Tue, 04 Sep 2018 21:25:12 +0800
#7517 mRegP match a0_RegP
1 /*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "c1/c1_Defs.hpp"
33 #include "c1/c1_Compilation.hpp"
34 #include "c1/c1_FrameMap.hpp"
35 #include "c1/c1_Instruction.hpp"
36 #include "c1/c1_LIRAssembler.hpp"
37 #include "c1/c1_LIRGenerator.hpp"
38 #include "c1/c1_ValueStack.hpp"
39 #include "ci/ciArrayKlass.hpp"
40 #include "ci/ciInstance.hpp"
41 #include "ci/ciObjArray.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/macros.hpp"
46 #if INCLUDE_ALL_GCS
47 #include "gc_implementation/g1/heapRegion.hpp"
48 #endif // INCLUDE_ALL_GCS
50 #ifdef ASSERT
51 #define __ gen()->lir(__FILE__, __LINE__)->
52 #else
53 #define __ gen()->lir()->
54 #endif
56 #ifndef PATCHED_ADDR
57 #define PATCHED_ADDR (max_jint)
58 #endif
60 void PhiResolverState::reset(int max_vregs) {
61 // Initialize array sizes
62 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
63 _virtual_operands.trunc_to(0);
64 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
65 _other_operands.trunc_to(0);
66 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
67 _vreg_table.trunc_to(0);
68 }
72 //--------------------------------------------------------------
73 // PhiResolver
75 // Resolves cycles:
76 //
77 // r1 := r2 becomes temp := r1
78 // r2 := r1 r1 := r2
79 // r2 := temp
80 // and orders moves:
81 //
82 // r2 := r3 becomes r1 := r2
83 // r1 := r2 r2 := r3
85 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
86 : _gen(gen)
87 , _state(gen->resolver_state())
88 , _temp(LIR_OprFact::illegalOpr)
89 {
90 // reinitialize the shared state arrays
91 _state.reset(max_vregs);
92 }
95 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
96 assert(src->is_valid(), "");
97 assert(dest->is_valid(), "");
98 __ move(src, dest);
99 }
102 void PhiResolver::move_temp_to(LIR_Opr dest) {
103 assert(_temp->is_valid(), "");
104 emit_move(_temp, dest);
105 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
106 }
109 void PhiResolver::move_to_temp(LIR_Opr src) {
110 assert(_temp->is_illegal(), "");
111 _temp = _gen->new_register(src->type());
112 emit_move(src, _temp);
113 }
116 // Traverse assignment graph in depth first order and generate moves in post order
117 // ie. two assignments: b := c, a := b start with node c:
118 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
119 // Generates moves in this order: move b to a and move c to b
120 // ie. cycle a := b, b := a start with node a
121 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
122 // Generates moves in this order: move b to temp, move a to b, move temp to a
123 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
124 if (!dest->visited()) {
125 dest->set_visited();
126 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
127 move(dest, dest->destination_at(i));
128 }
129 } else if (!dest->start_node()) {
130 // cylce in graph detected
131 assert(_loop == NULL, "only one loop valid!");
132 _loop = dest;
133 move_to_temp(src->operand());
134 return;
135 } // else dest is a start node
137 if (!dest->assigned()) {
138 if (_loop == dest) {
139 move_temp_to(dest->operand());
140 dest->set_assigned();
141 } else if (src != NULL) {
142 emit_move(src->operand(), dest->operand());
143 dest->set_assigned();
144 }
145 }
146 }
149 PhiResolver::~PhiResolver() {
150 int i;
151 // resolve any cycles in moves from and to virtual registers
152 for (i = virtual_operands().length() - 1; i >= 0; i --) {
153 ResolveNode* node = virtual_operands()[i];
154 if (!node->visited()) {
155 _loop = NULL;
156 move(NULL, node);
157 node->set_start_node();
158 assert(_temp->is_illegal(), "move_temp_to() call missing");
159 }
160 }
162 // generate move for move from non virtual register to abitrary destination
163 for (i = other_operands().length() - 1; i >= 0; i --) {
164 ResolveNode* node = other_operands()[i];
165 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
166 emit_move(node->operand(), node->destination_at(j)->operand());
167 }
168 }
169 }
172 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
173 ResolveNode* node;
174 if (opr->is_virtual()) {
175 int vreg_num = opr->vreg_number();
176 node = vreg_table().at_grow(vreg_num, NULL);
177 assert(node == NULL || node->operand() == opr, "");
178 if (node == NULL) {
179 node = new ResolveNode(opr);
180 vreg_table()[vreg_num] = node;
181 }
182 // Make sure that all virtual operands show up in the list when
183 // they are used as the source of a move.
184 if (source && !virtual_operands().contains(node)) {
185 virtual_operands().append(node);
186 }
187 } else {
188 assert(source, "");
189 node = new ResolveNode(opr);
190 other_operands().append(node);
191 }
192 return node;
193 }
196 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
197 assert(dest->is_virtual(), "");
198 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
199 assert(src->is_valid(), "");
200 assert(dest->is_valid(), "");
201 ResolveNode* source = source_node(src);
202 source->append(destination_node(dest));
203 }
206 //--------------------------------------------------------------
207 // LIRItem
209 void LIRItem::set_result(LIR_Opr opr) {
210 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
211 value()->set_operand(opr);
213 if (opr->is_virtual()) {
214 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
215 }
217 _result = opr;
218 }
220 void LIRItem::load_item() {
221 if (result()->is_illegal()) {
222 // update the items result
223 _result = value()->operand();
224 }
225 if (!result()->is_register()) {
226 LIR_Opr reg = _gen->new_register(value()->type());
227 __ move(result(), reg);
228 if (result()->is_constant()) {
229 _result = reg;
230 } else {
231 set_result(reg);
232 }
233 }
234 }
237 void LIRItem::load_for_store(BasicType type) {
238 if (_gen->can_store_as_constant(value(), type)) {
239 _result = value()->operand();
240 if (!_result->is_constant()) {
241 _result = LIR_OprFact::value_type(value()->type());
242 }
243 } else if (type == T_BYTE || type == T_BOOLEAN) {
244 load_byte_item();
245 } else {
246 load_item();
247 }
248 }
250 void LIRItem::load_item_force(LIR_Opr reg) {
251 LIR_Opr r = result();
252 if (r != reg) {
253 #if !defined(ARM) && !defined(E500V2)
254 if (r->type() != reg->type()) {
255 // moves between different types need an intervening spill slot
256 r = _gen->force_to_spill(r, reg->type());
257 }
258 #endif
259 __ move(r, reg);
260 _result = reg;
261 }
262 }
264 ciObject* LIRItem::get_jobject_constant() const {
265 ObjectType* oc = type()->as_ObjectType();
266 if (oc) {
267 return oc->constant_value();
268 }
269 return NULL;
270 }
273 jint LIRItem::get_jint_constant() const {
274 assert(is_constant() && value() != NULL, "");
275 assert(type()->as_IntConstant() != NULL, "type check");
276 return type()->as_IntConstant()->value();
277 }
280 jint LIRItem::get_address_constant() const {
281 assert(is_constant() && value() != NULL, "");
282 assert(type()->as_AddressConstant() != NULL, "type check");
283 return type()->as_AddressConstant()->value();
284 }
287 jfloat LIRItem::get_jfloat_constant() const {
288 assert(is_constant() && value() != NULL, "");
289 assert(type()->as_FloatConstant() != NULL, "type check");
290 return type()->as_FloatConstant()->value();
291 }
294 jdouble LIRItem::get_jdouble_constant() const {
295 assert(is_constant() && value() != NULL, "");
296 assert(type()->as_DoubleConstant() != NULL, "type check");
297 return type()->as_DoubleConstant()->value();
298 }
301 jlong LIRItem::get_jlong_constant() const {
302 assert(is_constant() && value() != NULL, "");
303 assert(type()->as_LongConstant() != NULL, "type check");
304 return type()->as_LongConstant()->value();
305 }
309 //--------------------------------------------------------------
312 void LIRGenerator::init() {
313 _bs = Universe::heap()->barrier_set();
314 #ifdef MIPS
315 assert(_bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
316 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
317 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
318 //_card_table_base = new LIR_Const((intptr_t)ct->byte_map_base);
319 _card_table_base = new LIR_Const(ct->byte_map_base);
320 #endif
321 }
324 void LIRGenerator::block_do_prolog(BlockBegin* block) {
325 #ifndef PRODUCT
326 if (PrintIRWithLIR) {
327 block->print();
328 }
329 #endif
331 // set up the list of LIR instructions
332 assert(block->lir() == NULL, "LIR list already computed for this block");
333 _lir = new LIR_List(compilation(), block);
334 block->set_lir(_lir);
336 __ branch_destination(block->label());
338 if (LIRTraceExecution &&
339 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
340 !block->is_set(BlockBegin::exception_entry_flag)) {
341 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
342 trace_block_entry(block);
343 }
344 }
347 void LIRGenerator::block_do_epilog(BlockBegin* block) {
348 #ifndef PRODUCT
349 if (PrintIRWithLIR) {
350 tty->cr();
351 }
352 #endif
354 // LIR_Opr for unpinned constants shouldn't be referenced by other
355 // blocks so clear them out after processing the block.
356 for (int i = 0; i < _unpinned_constants.length(); i++) {
357 _unpinned_constants.at(i)->clear_operand();
358 }
359 _unpinned_constants.trunc_to(0);
361 // clear our any registers for other local constants
362 _constants.trunc_to(0);
363 _reg_for_constants.trunc_to(0);
364 }
367 void LIRGenerator::block_do(BlockBegin* block) {
368 CHECK_BAILOUT();
370 block_do_prolog(block);
371 set_block(block);
373 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
374 if (instr->is_pinned()) do_root(instr);
375 }
377 set_block(NULL);
378 block_do_epilog(block);
379 }
382 //-------------------------LIRGenerator-----------------------------
384 // This is where the tree-walk starts; instr must be root;
385 void LIRGenerator::do_root(Value instr) {
386 CHECK_BAILOUT();
388 InstructionMark im(compilation(), instr);
390 assert(instr->is_pinned(), "use only with roots");
391 assert(instr->subst() == instr, "shouldn't have missed substitution");
393 instr->visit(this);
395 assert(!instr->has_uses() || instr->operand()->is_valid() ||
396 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
397 }
400 // This is called for each node in tree; the walk stops if a root is reached
401 void LIRGenerator::walk(Value instr) {
402 InstructionMark im(compilation(), instr);
403 //stop walk when encounter a root
404 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
405 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
406 } else {
407 assert(instr->subst() == instr, "shouldn't have missed substitution");
408 instr->visit(this);
409 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
410 }
411 }
414 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
415 assert(state != NULL, "state must be defined");
417 #ifndef PRODUCT
418 state->verify();
419 #endif
421 ValueStack* s = state;
422 for_each_state(s) {
423 if (s->kind() == ValueStack::EmptyExceptionState) {
424 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
425 continue;
426 }
428 int index;
429 Value value;
430 for_each_stack_value(s, index, value) {
431 assert(value->subst() == value, "missed substitution");
432 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
433 walk(value);
434 assert(value->operand()->is_valid(), "must be evaluated now");
435 }
436 }
438 int bci = s->bci();
439 IRScope* scope = s->scope();
440 ciMethod* method = scope->method();
442 MethodLivenessResult liveness = method->liveness_at_bci(bci);
443 if (bci == SynchronizationEntryBCI) {
444 if (x->as_ExceptionObject() || x->as_Throw()) {
445 // all locals are dead on exit from the synthetic unlocker
446 liveness.clear();
447 } else {
448 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
449 }
450 }
451 if (!liveness.is_valid()) {
452 // Degenerate or breakpointed method.
453 bailout("Degenerate or breakpointed method");
454 } else {
455 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
456 for_each_local_value(s, index, value) {
457 assert(value->subst() == value, "missed substition");
458 if (liveness.at(index) && !value->type()->is_illegal()) {
459 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
460 walk(value);
461 assert(value->operand()->is_valid(), "must be evaluated now");
462 }
463 } else {
464 // NULL out this local so that linear scan can assume that all non-NULL values are live.
465 s->invalidate_local(index);
466 }
467 }
468 }
469 }
471 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
472 }
475 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
476 return state_for(x, x->exception_state());
477 }
480 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
481 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
482 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
483 * the class. */
484 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
485 assert(info != NULL, "info must be set if class is not loaded");
486 __ klass2reg_patch(NULL, r, info);
487 } else {
488 // no patching needed
489 __ metadata2reg(obj->constant_encoding(), r);
490 }
491 }
494 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
495 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
496 CodeStub* stub = new RangeCheckStub(range_check_info, index);
497 if (index->is_constant()) {
498 #ifndef MIPS
499 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
500 index->as_jint(), null_check_info);
501 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
502 #else
503 LIR_Opr left = LIR_OprFact::address(new LIR_Address(array, arrayOopDesc::length_offset_in_bytes(), T_INT));
504 LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
505 __ null_check_for_branch(lir_cond_belowEqual, left, right, null_check_info);
506 __ branch(lir_cond_belowEqual, left, right ,T_INT, stub); // forward branch
507 #endif
508 } else {
509 #ifndef MIPS
510 cmp_reg_mem(lir_cond_aboveEqual, index, array,
511 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
512 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
513 #else
514 LIR_Opr left = index;
515 LIR_Opr right = LIR_OprFact::address(new LIR_Address( array, arrayOopDesc::length_offset_in_bytes(), T_INT));
516 __ null_check_for_branch(lir_cond_aboveEqual, left, right, null_check_info);
517 __ branch(lir_cond_aboveEqual,left, right ,T_INT, stub); // forward branch
518 #endif
519 }
520 }
523 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
524 CodeStub* stub = new RangeCheckStub(info, index, true);
525 if (index->is_constant()) {
526 #ifndef MIPS
527 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
528 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
529 #else
530 LIR_Opr left = LIR_OprFact::address(new LIR_Address(buffer, java_nio_Buffer::limit_offset(),T_INT));
531 LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
532 __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
533 __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
534 #endif
535 } else {
536 #ifndef MIPS
537 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
538 java_nio_Buffer::limit_offset(), T_INT, info);
539 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
540 #else
541 LIR_Opr left = index;
542 LIR_Opr right = LIR_OprFact::address(new LIR_Address( buffer, java_nio_Buffer::limit_offset(), T_INT));
543 __ null_check_for_branch(lir_cond_aboveEqual, left, right, info);
544 __ branch(lir_cond_aboveEqual,left, right ,T_INT, stub); // forward branch
545 #endif
546 }
547 __ move(index, result);
548 }
552 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
553 LIR_Opr result_op = result;
554 LIR_Opr left_op = left;
555 LIR_Opr right_op = right;
557 if (TwoOperandLIRForm && left_op != result_op) {
558 assert(right_op != result_op, "malformed");
559 __ move(left_op, result_op);
560 left_op = result_op;
561 }
563 switch(code) {
564 case Bytecodes::_dadd:
565 case Bytecodes::_fadd:
566 case Bytecodes::_ladd:
567 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
568 case Bytecodes::_fmul:
569 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
571 case Bytecodes::_dmul:
572 {
573 if (is_strictfp) {
574 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
575 } else {
576 __ mul(left_op, right_op, result_op); break;
577 }
578 }
579 break;
581 case Bytecodes::_imul:
582 {
583 bool did_strength_reduce = false;
585 if (right->is_constant()) {
586 int c = right->as_jint();
587 if (is_power_of_2(c)) {
588 // do not need tmp here
589 __ shift_left(left_op, exact_log2(c), result_op);
590 did_strength_reduce = true;
591 } else {
592 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
593 }
594 }
595 // we couldn't strength reduce so just emit the multiply
596 if (!did_strength_reduce) {
597 __ mul(left_op, right_op, result_op);
598 }
599 }
600 break;
602 case Bytecodes::_dsub:
603 case Bytecodes::_fsub:
604 case Bytecodes::_lsub:
605 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
607 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
608 // ldiv and lrem are implemented with a direct runtime call
610 case Bytecodes::_ddiv:
611 {
612 if (is_strictfp) {
613 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
614 } else {
615 __ div (left_op, right_op, result_op); break;
616 }
617 }
618 break;
620 case Bytecodes::_drem:
621 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
623 default: ShouldNotReachHere();
624 }
625 }
628 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
629 arithmetic_op(code, result, left, right, false, tmp);
630 }
633 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
634 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
635 }
638 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
639 arithmetic_op(code, result, left, right, is_strictfp, tmp);
640 }
643 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
644 if (TwoOperandLIRForm && value != result_op) {
645 assert(count != result_op, "malformed");
646 __ move(value, result_op);
647 value = result_op;
648 }
650 assert(count->is_constant() || count->is_register(), "must be");
651 switch(code) {
652 case Bytecodes::_ishl:
653 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
654 case Bytecodes::_ishr:
655 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
656 case Bytecodes::_iushr:
657 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
658 default: ShouldNotReachHere();
659 }
660 }
663 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
664 if (TwoOperandLIRForm && left_op != result_op) {
665 assert(right_op != result_op, "malformed");
666 __ move(left_op, result_op);
667 left_op = result_op;
668 }
670 switch(code) {
671 case Bytecodes::_iand:
672 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
674 case Bytecodes::_ior:
675 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
677 case Bytecodes::_ixor:
678 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
680 default: ShouldNotReachHere();
681 }
682 }
685 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
686 if (!GenerateSynchronizationCode) return;
687 // for slow path, use debug info for state after successful locking
688 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
689 __ load_stack_address_monitor(monitor_no, lock);
690 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
691 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
692 }
695 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
696 if (!GenerateSynchronizationCode) return;
697 // setup registers
698 LIR_Opr hdr = lock;
699 lock = new_hdr;
700 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
701 __ load_stack_address_monitor(monitor_no, lock);
702 __ unlock_object(hdr, object, lock, scratch, slow_path);
703 }
705 #ifndef PRODUCT
706 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
707 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
708 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
709 } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
710 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
711 }
712 }
713 #endif
715 #ifndef MIPS
716 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
717 #else
718 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3,
719 LIR_Opr scratch4, LIR_Opr scratch5, LIR_Opr scratch6,LIR_Opr klass_reg, CodeEmitInfo* info) {
720 #endif
721 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
722 // If klass is not loaded we do not know if the klass has finalizers:
723 if (UseFastNewInstance && klass->is_loaded()
724 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
726 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
728 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
730 assert(klass->is_loaded(), "must be loaded");
731 // allocate space for instance
732 assert(klass->size_helper() >= 0, "illegal instance size");
733 const int instance_size = align_object_size(klass->size_helper());
734 #ifndef MIPS
735 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
736 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
737 #else
738 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, scratch5, scratch6,
739 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
741 #endif
742 } else {
743 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
744 #ifndef MIPS
745 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
746 __ branch_destination(slow_path->continuation());
747 #else
748 __ branch(lir_cond_always, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, T_ILLEGAL, slow_path);
749 __ branch_destination(slow_path->continuation());
750 #endif
751 }
752 }
755 static bool is_constant_zero(Instruction* inst) {
756 IntConstant* c = inst->type()->as_IntConstant();
757 if (c) {
758 return (c->value() == 0);
759 }
760 return false;
761 }
764 static bool positive_constant(Instruction* inst) {
765 IntConstant* c = inst->type()->as_IntConstant();
766 if (c) {
767 return (c->value() >= 0);
768 }
769 return false;
770 }
773 static ciArrayKlass* as_array_klass(ciType* type) {
774 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
775 return (ciArrayKlass*)type;
776 } else {
777 return NULL;
778 }
779 }
781 static ciType* phi_declared_type(Phi* phi) {
782 ciType* t = phi->operand_at(0)->declared_type();
783 if (t == NULL) {
784 return NULL;
785 }
786 for(int i = 1; i < phi->operand_count(); i++) {
787 if (t != phi->operand_at(i)->declared_type()) {
788 return NULL;
789 }
790 }
791 return t;
792 }
794 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
795 Instruction* src = x->argument_at(0);
796 Instruction* src_pos = x->argument_at(1);
797 Instruction* dst = x->argument_at(2);
798 Instruction* dst_pos = x->argument_at(3);
799 Instruction* length = x->argument_at(4);
801 // first try to identify the likely type of the arrays involved
802 ciArrayKlass* expected_type = NULL;
803 bool is_exact = false, src_objarray = false, dst_objarray = false;
804 {
805 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
806 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
807 Phi* phi;
808 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
809 src_declared_type = as_array_klass(phi_declared_type(phi));
810 }
811 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
812 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
813 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
814 dst_declared_type = as_array_klass(phi_declared_type(phi));
815 }
817 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
818 // the types exactly match so the type is fully known
819 is_exact = true;
820 expected_type = src_exact_type;
821 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
822 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
823 ciArrayKlass* src_type = NULL;
824 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
825 src_type = (ciArrayKlass*) src_exact_type;
826 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
827 src_type = (ciArrayKlass*) src_declared_type;
828 }
829 if (src_type != NULL) {
830 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
831 is_exact = true;
832 expected_type = dst_type;
833 }
834 }
835 }
836 // at least pass along a good guess
837 if (expected_type == NULL) expected_type = dst_exact_type;
838 if (expected_type == NULL) expected_type = src_declared_type;
839 if (expected_type == NULL) expected_type = dst_declared_type;
841 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
842 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
843 }
845 // if a probable array type has been identified, figure out if any
846 // of the required checks for a fast case can be elided.
847 int flags = LIR_OpArrayCopy::all_flags;
849 if (!src_objarray)
850 flags &= ~LIR_OpArrayCopy::src_objarray;
851 if (!dst_objarray)
852 flags &= ~LIR_OpArrayCopy::dst_objarray;
854 if (!x->arg_needs_null_check(0))
855 flags &= ~LIR_OpArrayCopy::src_null_check;
856 if (!x->arg_needs_null_check(2))
857 flags &= ~LIR_OpArrayCopy::dst_null_check;
860 if (expected_type != NULL) {
861 Value length_limit = NULL;
863 IfOp* ifop = length->as_IfOp();
864 if (ifop != NULL) {
865 // look for expressions like min(v, a.length) which ends up as
866 // x > y ? y : x or x >= y ? y : x
867 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
868 ifop->x() == ifop->fval() &&
869 ifop->y() == ifop->tval()) {
870 length_limit = ifop->y();
871 }
872 }
874 // try to skip null checks and range checks
875 NewArray* src_array = src->as_NewArray();
876 if (src_array != NULL) {
877 flags &= ~LIR_OpArrayCopy::src_null_check;
878 if (length_limit != NULL &&
879 src_array->length() == length_limit &&
880 is_constant_zero(src_pos)) {
881 flags &= ~LIR_OpArrayCopy::src_range_check;
882 }
883 }
885 NewArray* dst_array = dst->as_NewArray();
886 if (dst_array != NULL) {
887 flags &= ~LIR_OpArrayCopy::dst_null_check;
888 if (length_limit != NULL &&
889 dst_array->length() == length_limit &&
890 is_constant_zero(dst_pos)) {
891 flags &= ~LIR_OpArrayCopy::dst_range_check;
892 }
893 }
895 // check from incoming constant values
896 if (positive_constant(src_pos))
897 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
898 if (positive_constant(dst_pos))
899 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
900 if (positive_constant(length))
901 flags &= ~LIR_OpArrayCopy::length_positive_check;
903 // see if the range check can be elided, which might also imply
904 // that src or dst is non-null.
905 ArrayLength* al = length->as_ArrayLength();
906 if (al != NULL) {
907 if (al->array() == src) {
908 // it's the length of the source array
909 flags &= ~LIR_OpArrayCopy::length_positive_check;
910 flags &= ~LIR_OpArrayCopy::src_null_check;
911 if (is_constant_zero(src_pos))
912 flags &= ~LIR_OpArrayCopy::src_range_check;
913 }
914 if (al->array() == dst) {
915 // it's the length of the destination array
916 flags &= ~LIR_OpArrayCopy::length_positive_check;
917 flags &= ~LIR_OpArrayCopy::dst_null_check;
918 if (is_constant_zero(dst_pos))
919 flags &= ~LIR_OpArrayCopy::dst_range_check;
920 }
921 }
922 if (is_exact) {
923 flags &= ~LIR_OpArrayCopy::type_check;
924 }
925 }
927 IntConstant* src_int = src_pos->type()->as_IntConstant();
928 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
929 if (src_int && dst_int) {
930 int s_offs = src_int->value();
931 int d_offs = dst_int->value();
932 if (src_int->value() >= dst_int->value()) {
933 flags &= ~LIR_OpArrayCopy::overlapping;
934 }
935 if (expected_type != NULL) {
936 BasicType t = expected_type->element_type()->basic_type();
937 int element_size = type2aelembytes(t);
938 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
939 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
940 flags &= ~LIR_OpArrayCopy::unaligned;
941 }
942 }
943 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
944 // src and dest positions are the same, or dst is zero so assume
945 // nonoverlapping copy.
946 flags &= ~LIR_OpArrayCopy::overlapping;
947 }
949 if (src == dst) {
950 // moving within a single array so no type checks are needed
951 if (flags & LIR_OpArrayCopy::type_check) {
952 flags &= ~LIR_OpArrayCopy::type_check;
953 }
954 }
955 *flagsp = flags;
956 *expected_typep = (ciArrayKlass*)expected_type;
957 }
960 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
961 assert(opr->is_register(), "why spill if item is not register?");
963 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
964 LIR_Opr result = new_register(T_FLOAT);
965 set_vreg_flag(result, must_start_in_memory);
966 assert(opr->is_register(), "only a register can be spilled");
967 assert(opr->value_type()->is_float(), "rounding only for floats available");
968 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
969 return result;
970 }
971 return opr;
972 }
975 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
976 assert(type2size[t] == type2size[value->type()],
977 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
978 if (!value->is_register()) {
979 // force into a register
980 LIR_Opr r = new_register(value->type());
981 __ move(value, r);
982 value = r;
983 }
985 // create a spill location
986 LIR_Opr tmp = new_register(t);
987 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
989 // move from register to spill
990 __ move(value, tmp);
991 return tmp;
992 }
994 #ifndef MIPS
995 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
996 if (if_instr->should_profile()) {
997 ciMethod* method = if_instr->profiled_method();
998 assert(method != NULL, "method should be set if branch is profiled");
999 ciMethodData* md = method->method_data_or_null();
1000 assert(md != NULL, "Sanity");
1001 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
1002 assert(data != NULL, "must have profiling data");
1003 assert(data->is_BranchData(), "need BranchData for two-way branches");
1004 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
1005 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
1006 if (if_instr->is_swapped()) {
1007 int t = taken_count_offset;
1008 taken_count_offset = not_taken_count_offset;
1009 not_taken_count_offset = t;
1010 }
1012 LIR_Opr md_reg = new_register(T_METADATA);
1013 __ metadata2reg(md->constant_encoding(), md_reg);
1015 LIR_Opr data_offset_reg = new_pointer_register();
1016 __ cmove(lir_cond(cond),
1017 LIR_OprFact::intptrConst(taken_count_offset),
1018 LIR_OprFact::intptrConst(not_taken_count_offset),
1019 data_offset_reg, as_BasicType(if_instr->x()->type()));
1021 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
1022 LIR_Opr data_reg = new_pointer_register();
1023 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
1024 __ move(data_addr, data_reg);
1025 // Use leal instead of add to avoid destroying condition codes on x86
1026 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
1027 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
1028 __ move(data_reg, data_addr);
1029 }
1030 }
1031 #else
1032 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond , LIR_Opr left, LIR_Opr right) {
1033 if (if_instr->should_profile()) {
1034 ciMethod* method = if_instr->profiled_method();
1035 assert(method != NULL, "method should be set if branch is profiled");
1036 ciMethodData* md = method->method_data_or_null();
1037 assert(md != NULL, "Sanity");
1038 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
1039 assert(data != NULL, "must have profiling data");
1040 assert(data->is_BranchData(), "need BranchData for two-way branches");
1041 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
1042 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
1043 if (if_instr->is_swapped()) {
1044 int t = taken_count_offset;
1045 taken_count_offset = not_taken_count_offset;
1046 not_taken_count_offset = t;
1047 }
1049 LIR_Opr md_reg = new_register(T_METADATA);
1050 __ metadata2reg(md->constant_encoding(), md_reg);
1051 LIR_Opr data_offset_reg = new_pointer_register();
1053 LIR_Opr opr1 = LIR_OprFact::intptrConst(taken_count_offset);
1054 LIR_Opr opr2 = LIR_OprFact::intptrConst(not_taken_count_offset);
1055 LabelObj* skip = new LabelObj();
1057 __ cmove_mips(lir_cond(cond), left, right, opr1, opr2, data_offset_reg, as_BasicType(if_instr->x()->type()));
1059 LIR_Opr data_reg = new_pointer_register();
1060 LIR_Opr tmp_reg = new_pointer_register();
1061 __ move(data_offset_reg, tmp_reg);
1062 __ add(tmp_reg, md_reg, tmp_reg);
1063 LIR_Address* data_addr = new LIR_Address(tmp_reg, 0, data_reg->type());
1064 __ move(data_addr, data_reg);
1065 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
1066 // Use leal instead of add to avoid destroying condition codes on x86
1067 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
1068 __ move(data_reg, data_addr);
1069 }
1070 }
1072 #endif
1074 // Phi technique:
1075 // This is about passing live values from one basic block to the other.
1076 // In code generated with Java it is rather rare that more than one
1077 // value is on the stack from one basic block to the other.
1078 // We optimize our technique for efficient passing of one value
1079 // (of type long, int, double..) but it can be extended.
1080 // When entering or leaving a basic block, all registers and all spill
1081 // slots are release and empty. We use the released registers
1082 // and spill slots to pass the live values from one block
1083 // to the other. The topmost value, i.e., the value on TOS of expression
1084 // stack is passed in registers. All other values are stored in spilling
1085 // area. Every Phi has an index which designates its spill slot
1086 // At exit of a basic block, we fill the register(s) and spill slots.
1087 // At entry of a basic block, the block_prolog sets up the content of phi nodes
1088 // and locks necessary registers and spilling slots.
1091 // move current value to referenced phi function
1092 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
1093 Phi* phi = sux_val->as_Phi();
1094 // cur_val can be null without phi being null in conjunction with inlining
1095 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1096 LIR_Opr operand = cur_val->operand();
1097 if (cur_val->operand()->is_illegal()) {
1098 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1099 "these can be produced lazily");
1100 operand = operand_for_instruction(cur_val);
1101 }
1102 resolver->move(operand, operand_for_instruction(phi));
1103 }
1104 }
1107 // Moves all stack values into their PHI position
1108 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1109 BlockBegin* bb = block();
1110 if (bb->number_of_sux() == 1) {
1111 BlockBegin* sux = bb->sux_at(0);
1112 assert(sux->number_of_preds() > 0, "invalid CFG");
1114 // a block with only one predecessor never has phi functions
1115 if (sux->number_of_preds() > 1) {
1116 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1117 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1119 ValueStack* sux_state = sux->state();
1120 Value sux_value;
1121 int index;
1123 assert(cur_state->scope() == sux_state->scope(), "not matching");
1124 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1125 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1127 for_each_stack_value(sux_state, index, sux_value) {
1128 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1129 }
1131 for_each_local_value(sux_state, index, sux_value) {
1132 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1133 }
1135 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1136 }
1137 }
1138 }
1141 LIR_Opr LIRGenerator::new_register(BasicType type) {
1142 int vreg = _virtual_register_number;
1143 // add a little fudge factor for the bailout, since the bailout is
1144 // only checked periodically. This gives a few extra registers to
1145 // hand out before we really run out, which helps us keep from
1146 // tripping over assertions.
1147 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1148 bailout("out of virtual registers");
1149 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1150 // wrap it around
1151 _virtual_register_number = LIR_OprDesc::vreg_base;
1152 }
1153 }
1154 _virtual_register_number += 1;
1155 return LIR_OprFact::virtual_register(vreg, type);
1156 }
1159 // Try to lock using register in hint
1160 LIR_Opr LIRGenerator::rlock(Value instr) {
1161 return new_register(instr->type());
1162 }
1165 // does an rlock and sets result
1166 LIR_Opr LIRGenerator::rlock_result(Value x) {
1167 LIR_Opr reg = rlock(x);
1168 set_result(x, reg);
1169 return reg;
1170 }
1173 // does an rlock and sets result
1174 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1175 LIR_Opr reg;
1176 switch (type) {
1177 case T_BYTE:
1178 case T_BOOLEAN:
1179 reg = rlock_byte(type);
1180 break;
1181 default:
1182 reg = rlock(x);
1183 break;
1184 }
1186 set_result(x, reg);
1187 return reg;
1188 }
1191 //---------------------------------------------------------------------
1192 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1193 ObjectType* oc = value->type()->as_ObjectType();
1194 if (oc) {
1195 return oc->constant_value();
1196 }
1197 return NULL;
1198 }
1199 #ifdef MIPS
1200 void LIRGenerator::write_barrier(LIR_Opr addr) {
1201 if (addr->is_address()) {
1202 LIR_Address* address = (LIR_Address*)addr;
1203 LIR_Opr ptr = new_register(T_OBJECT);
1204 if (!address->index()->is_valid() && address->disp() == 0) {
1205 __ move(address->base(), ptr);
1206 } else {
1207 __ leal(addr, ptr);
1208 }
1209 addr = ptr;
1210 }
1211 assert(addr->is_register(), "must be a register at this point");
1213 LIR_Opr tmp = new_pointer_register();
1214 if (TwoOperandLIRForm) {
1215 __ move(addr, tmp);
1216 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1217 } else {
1218 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1219 }
1220 if (can_inline_as_constant(card_table_base())) {
1221 __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, card_table_base()->as_jint(), T_BYTE));
1222 } else {
1223 __ add(tmp, load_constant(card_table_base()), tmp);
1224 __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, 0, T_BYTE));
1225 }
1226 }
1227 #endif
1230 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1231 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1232 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1234 // no moves are created for phi functions at the begin of exception
1235 // handlers, so assign operands manually here
1236 for_each_phi_fun(block(), phi,
1237 operand_for_instruction(phi));
1239 LIR_Opr thread_reg = getThreadPointer();
1240 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1241 exceptionOopOpr());
1242 __ move_wide(LIR_OprFact::oopConst(NULL),
1243 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1244 __ move_wide(LIR_OprFact::oopConst(NULL),
1245 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1247 LIR_Opr result = new_register(T_OBJECT);
1248 __ move(exceptionOopOpr(), result);
1249 set_result(x, result);
1250 }
1253 //----------------------------------------------------------------------
1254 //----------------------------------------------------------------------
1255 //----------------------------------------------------------------------
1256 //----------------------------------------------------------------------
1257 // visitor functions
1258 //----------------------------------------------------------------------
1259 //----------------------------------------------------------------------
1260 //----------------------------------------------------------------------
1261 //----------------------------------------------------------------------
1263 void LIRGenerator::do_Phi(Phi* x) {
1264 // phi functions are never visited directly
1265 ShouldNotReachHere();
1266 }
1269 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1270 void LIRGenerator::do_Constant(Constant* x) {
1271 if (x->state_before() != NULL) {
1272 // Any constant with a ValueStack requires patching so emit the patch here
1273 LIR_Opr reg = rlock_result(x);
1274 CodeEmitInfo* info = state_for(x, x->state_before());
1275 __ oop2reg_patch(NULL, reg, info);
1276 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1277 if (!x->is_pinned()) {
1278 // unpinned constants are handled specially so that they can be
1279 // put into registers when they are used multiple times within a
1280 // block. After the block completes their operand will be
1281 // cleared so that other blocks can't refer to that register.
1282 set_result(x, load_constant(x));
1283 } else {
1284 LIR_Opr res = x->operand();
1285 if (!res->is_valid()) {
1286 res = LIR_OprFact::value_type(x->type());
1287 }
1288 if (res->is_constant()) {
1289 LIR_Opr reg = rlock_result(x);
1290 __ move(res, reg);
1291 } else {
1292 set_result(x, res);
1293 }
1294 }
1295 } else {
1296 set_result(x, LIR_OprFact::value_type(x->type()));
1297 }
1298 }
1301 void LIRGenerator::do_Local(Local* x) {
1302 // operand_for_instruction has the side effect of setting the result
1303 // so there's no need to do it here.
1304 operand_for_instruction(x);
1305 }
1308 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1309 Unimplemented();
1310 }
1313 void LIRGenerator::do_Return(Return* x) {
1314 if (compilation()->env()->dtrace_method_probes()) {
1315 BasicTypeList signature;
1316 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1317 signature.append(T_METADATA); // Method*
1318 LIR_OprList* args = new LIR_OprList();
1319 args->append(getThreadPointer());
1320 LIR_Opr meth = new_register(T_METADATA);
1321 __ metadata2reg(method()->constant_encoding(), meth);
1322 args->append(meth);
1323 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1324 }
1326 if (x->type()->is_void()) {
1327 __ return_op(LIR_OprFact::illegalOpr);
1328 } else {
1329 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1330 LIRItem result(x->result(), this);
1332 result.load_item_force(reg);
1333 __ return_op(result.result());
1334 }
1335 set_no_result(x);
1336 }
1338 // Examble: ref.get()
1339 // Combination of LoadField and g1 pre-write barrier
1340 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1342 const int referent_offset = java_lang_ref_Reference::referent_offset;
1343 guarantee(referent_offset > 0, "referent offset not initialized");
1345 assert(x->number_of_arguments() == 1, "wrong type");
1347 LIRItem reference(x->argument_at(0), this);
1348 reference.load_item();
1350 // need to perform the null check on the reference objecy
1351 CodeEmitInfo* info = NULL;
1352 if (x->needs_null_check()) {
1353 info = state_for(x);
1354 }
1356 LIR_Address* referent_field_adr =
1357 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1359 LIR_Opr result = rlock_result(x);
1361 __ load(referent_field_adr, result, info);
1363 // Register the value in the referent field with the pre-barrier
1364 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1365 result /* pre_val */,
1366 false /* do_load */,
1367 false /* patch */,
1368 NULL /* info */);
1369 }
1371 // Example: clazz.isInstance(object)
1372 void LIRGenerator::do_isInstance(Intrinsic* x) {
1373 assert(x->number_of_arguments() == 2, "wrong type");
1375 // TODO could try to substitute this node with an equivalent InstanceOf
1376 // if clazz is known to be a constant Class. This will pick up newly found
1377 // constants after HIR construction. I'll leave this to a future change.
1379 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1380 // could follow the aastore example in a future change.
1382 LIRItem clazz(x->argument_at(0), this);
1383 LIRItem object(x->argument_at(1), this);
1384 clazz.load_item();
1385 object.load_item();
1386 LIR_Opr result = rlock_result(x);
1388 // need to perform null check on clazz
1389 if (x->needs_null_check()) {
1390 CodeEmitInfo* info = state_for(x);
1391 __ null_check(clazz.result(), info);
1392 }
1394 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1395 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1396 x->type(),
1397 NULL); // NULL CodeEmitInfo results in a leaf call
1398 __ move(call_result, result);
1399 }
1401 // Example: object.getClass ()
1402 void LIRGenerator::do_getClass(Intrinsic* x) {
1403 assert(x->number_of_arguments() == 1, "wrong type");
1405 LIRItem rcvr(x->argument_at(0), this);
1406 rcvr.load_item();
1407 LIR_Opr temp = new_register(T_METADATA);
1408 LIR_Opr result = rlock_result(x);
1410 // need to perform the null check on the rcvr
1411 CodeEmitInfo* info = NULL;
1412 if (x->needs_null_check()) {
1413 info = state_for(x);
1414 }
1416 // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1417 // meaning of these two is mixed up (see JDK-8026837).
1418 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1419 __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1420 }
1423 // Example: Thread.currentThread()
1424 void LIRGenerator::do_currentThread(Intrinsic* x) {
1425 assert(x->number_of_arguments() == 0, "wrong type");
1426 LIR_Opr reg = rlock_result(x);
1427 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1428 }
1431 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1432 assert(x->number_of_arguments() == 1, "wrong type");
1433 LIRItem receiver(x->argument_at(0), this);
1435 receiver.load_item();
1436 BasicTypeList signature;
1437 signature.append(T_OBJECT); // receiver
1438 LIR_OprList* args = new LIR_OprList();
1439 args->append(receiver.result());
1440 CodeEmitInfo* info = state_for(x, x->state());
1441 call_runtime(&signature, args,
1442 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1443 voidType, info);
1445 set_no_result(x);
1446 }
1449 //------------------------local access--------------------------------------
1451 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1452 if (x->operand()->is_illegal()) {
1453 Constant* c = x->as_Constant();
1454 if (c != NULL) {
1455 x->set_operand(LIR_OprFact::value_type(c->type()));
1456 } else {
1457 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1458 // allocate a virtual register for this local or phi
1459 x->set_operand(rlock(x));
1460 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1461 }
1462 }
1463 return x->operand();
1464 }
1467 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1468 if (opr->is_virtual()) {
1469 return instruction_for_vreg(opr->vreg_number());
1470 }
1471 return NULL;
1472 }
1475 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1476 if (reg_num < _instruction_for_operand.length()) {
1477 return _instruction_for_operand.at(reg_num);
1478 }
1479 return NULL;
1480 }
1483 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1484 if (_vreg_flags.size_in_bits() == 0) {
1485 BitMap2D temp(100, num_vreg_flags);
1486 temp.clear();
1487 _vreg_flags = temp;
1488 }
1489 _vreg_flags.at_put_grow(vreg_num, f, true);
1490 }
1492 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1493 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1494 return false;
1495 }
1496 return _vreg_flags.at(vreg_num, f);
1497 }
1500 // Block local constant handling. This code is useful for keeping
1501 // unpinned constants and constants which aren't exposed in the IR in
1502 // registers. Unpinned Constant instructions have their operands
1503 // cleared when the block is finished so that other blocks can't end
1504 // up referring to their registers.
1506 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1507 assert(!x->is_pinned(), "only for unpinned constants");
1508 _unpinned_constants.append(x);
1509 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1510 }
1513 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1514 BasicType t = c->type();
1515 for (int i = 0; i < _constants.length(); i++) {
1516 LIR_Const* other = _constants.at(i);
1517 if (t == other->type()) {
1518 switch (t) {
1519 case T_INT:
1520 case T_FLOAT:
1521 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1522 break;
1523 case T_LONG:
1524 case T_DOUBLE:
1525 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1526 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1527 break;
1528 case T_OBJECT:
1529 if (c->as_jobject() != other->as_jobject()) continue;
1530 break;
1531 }
1532 return _reg_for_constants.at(i);
1533 }
1534 }
1536 LIR_Opr result = new_register(t);
1537 __ move((LIR_Opr)c, result);
1538 _constants.append(c);
1539 _reg_for_constants.append(result);
1540 return result;
1541 }
1543 // Various barriers
1545 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1546 bool do_load, bool patch, CodeEmitInfo* info) {
1547 // Do the pre-write barrier, if any.
1548 switch (_bs->kind()) {
1549 #if INCLUDE_ALL_GCS
1550 case BarrierSet::G1SATBCT:
1551 case BarrierSet::G1SATBCTLogging:
1552 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1553 break;
1554 #endif // INCLUDE_ALL_GCS
1555 case BarrierSet::CardTableModRef:
1556 case BarrierSet::CardTableExtension:
1557 // No pre barriers
1558 break;
1559 case BarrierSet::ModRef:
1560 case BarrierSet::Other:
1561 // No pre barriers
1562 break;
1563 default :
1564 ShouldNotReachHere();
1566 }
1567 }
1569 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1570 switch (_bs->kind()) {
1571 #if INCLUDE_ALL_GCS
1572 case BarrierSet::G1SATBCT:
1573 case BarrierSet::G1SATBCTLogging:
1574 G1SATBCardTableModRef_post_barrier(addr, new_val);
1575 break;
1576 #endif // INCLUDE_ALL_GCS
1577 case BarrierSet::CardTableModRef:
1578 case BarrierSet::CardTableExtension:
1579 CardTableModRef_post_barrier(addr, new_val);
1580 break;
1581 case BarrierSet::ModRef:
1582 case BarrierSet::Other:
1583 // No post barriers
1584 break;
1585 default :
1586 ShouldNotReachHere();
1587 }
1588 }
1590 ////////////////////////////////////////////////////////////////////////
1591 #if INCLUDE_ALL_GCS
1593 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1594 bool do_load, bool patch, CodeEmitInfo* info) {
1595 // First we test whether marking is in progress.
1596 BasicType flag_type;
1597 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1598 flag_type = T_INT;
1599 } else {
1600 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1601 "Assumption");
1602 flag_type = T_BYTE;
1603 }
1604 LIR_Opr thrd = getThreadPointer();
1605 LIR_Address* mark_active_flag_addr =
1606 new LIR_Address(thrd,
1607 in_bytes(JavaThread::satb_mark_queue_offset() +
1608 PtrQueue::byte_offset_of_active()),
1609 flag_type);
1610 // Read the marking-in-progress flag.
1611 LIR_Opr flag_val = new_register(T_INT);
1612 __ load(mark_active_flag_addr, flag_val);
1613 //MIPS not support cmp.
1614 #ifndef MIPS
1615 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1616 #endif
1618 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1620 CodeStub* slow;
1622 if (do_load) {
1623 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1624 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1626 if (patch)
1627 pre_val_patch_code = lir_patch_normal;
1629 pre_val = new_register(T_OBJECT);
1631 if (!addr_opr->is_address()) {
1632 assert(addr_opr->is_register(), "must be");
1633 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1634 }
1635 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1636 } else {
1637 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1638 assert(pre_val->is_register(), "must be");
1639 assert(pre_val->type() == T_OBJECT, "must be an object");
1640 assert(info == NULL, "sanity");
1642 slow = new G1PreBarrierStub(pre_val);
1643 }
1645 #ifndef MIPS
1646 __ branch(lir_cond_notEqual, T_INT, slow);
1647 #else
1648 __ branch(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0), T_INT, slow);
1649 #endif
1650 __ branch_destination(slow->continuation());
1651 }
1653 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1654 // If the "new_val" is a constant NULL, no barrier is necessary.
1655 if (new_val->is_constant() &&
1656 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1658 if (!new_val->is_register()) {
1659 LIR_Opr new_val_reg = new_register(T_OBJECT);
1660 if (new_val->is_constant()) {
1661 __ move(new_val, new_val_reg);
1662 } else {
1663 __ leal(new_val, new_val_reg);
1664 }
1665 new_val = new_val_reg;
1666 }
1667 assert(new_val->is_register(), "must be a register at this point");
1669 if (addr->is_address()) {
1670 LIR_Address* address = addr->as_address_ptr();
1671 LIR_Opr ptr = new_pointer_register();
1672 if (!address->index()->is_valid() && address->disp() == 0) {
1673 __ move(address->base(), ptr);
1674 } else {
1675 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1676 __ leal(addr, ptr);
1677 }
1678 addr = ptr;
1679 }
1680 assert(addr->is_register(), "must be a register at this point");
1682 LIR_Opr xor_res = new_pointer_register();
1683 LIR_Opr xor_shift_res = new_pointer_register();
1684 if (TwoOperandLIRForm ) {
1685 __ move(addr, xor_res);
1686 __ logical_xor(xor_res, new_val, xor_res);
1687 __ move(xor_res, xor_shift_res);
1688 __ unsigned_shift_right(xor_shift_res,
1689 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1690 xor_shift_res,
1691 LIR_OprDesc::illegalOpr());
1692 } else {
1693 __ logical_xor(addr, new_val, xor_res);
1694 __ unsigned_shift_right(xor_res,
1695 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1696 xor_shift_res,
1697 LIR_OprDesc::illegalOpr());
1698 }
1700 if (!new_val->is_register()) {
1701 LIR_Opr new_val_reg = new_register(T_OBJECT);
1702 __ leal(new_val, new_val_reg);
1703 new_val = new_val_reg;
1704 }
1705 assert(new_val->is_register(), "must be a register at this point");
1707 #ifndef MIPS
1708 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1710 #endif
1711 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1712 #ifndef MIPS
1713 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1714 #else
1715 __ branch(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst((intptr_t)NULL_WORD), LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1716 #endif
1717 __ branch_destination(slow->continuation());
1718 }
1720 #endif // INCLUDE_ALL_GCS
1721 ////////////////////////////////////////////////////////////////////////
1723 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1725 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1726 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1727 if (addr->is_address()) {
1728 LIR_Address* address = addr->as_address_ptr();
1729 // ptr cannot be an object because we use this barrier for array card marks
1730 // and addr can point in the middle of an array.
1731 LIR_Opr ptr = new_pointer_register();
1732 if (!address->index()->is_valid() && address->disp() == 0) {
1733 __ move(address->base(), ptr);
1734 } else {
1735 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1736 __ leal(addr, ptr);
1737 }
1738 addr = ptr;
1739 }
1740 assert(addr->is_register(), "must be a register at this point");
1742 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1743 CardTableModRef_post_barrier_helper(addr, card_table_base);
1744 #else
1745 LIR_Opr tmp = new_pointer_register();
1746 if (TwoOperandLIRForm) {
1747 __ move(addr, tmp);
1748 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1749 } else {
1750 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1751 }
1752 if (can_inline_as_constant(card_table_base)) {
1753 __ move(LIR_OprFact::intConst(0),
1754 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1755 } else {
1756 #ifndef MIPS
1757 __ move(LIR_OprFact::intConst(0),
1758 new LIR_Address(tmp, load_constant(card_table_base),
1759 T_BYTE));
1760 #else
1761 __ add(tmp, load_constant(card_table_base), tmp);
1762 __ move(LIR_OprFact::intConst(0),
1763 new LIR_Address(tmp, 0,
1764 T_BYTE));
1765 #endif
1766 }
1767 #endif
1768 }
1771 //------------------------field access--------------------------------------
1773 // Comment copied form templateTable_i486.cpp
1774 // ----------------------------------------------------------------------------
1775 // Volatile variables demand their effects be made known to all CPU's in
1776 // order. Store buffers on most chips allow reads & writes to reorder; the
1777 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1778 // memory barrier (i.e., it's not sufficient that the interpreter does not
1779 // reorder volatile references, the hardware also must not reorder them).
1780 //
1781 // According to the new Java Memory Model (JMM):
1782 // (1) All volatiles are serialized wrt to each other.
1783 // ALSO reads & writes act as aquire & release, so:
1784 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1785 // the read float up to before the read. It's OK for non-volatile memory refs
1786 // that happen before the volatile read to float down below it.
1787 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1788 // that happen BEFORE the write float down to after the write. It's OK for
1789 // non-volatile memory refs that happen after the volatile write to float up
1790 // before it.
1791 //
1792 // We only put in barriers around volatile refs (they are expensive), not
1793 // _between_ memory refs (that would require us to track the flavor of the
1794 // previous memory refs). Requirements (2) and (3) require some barriers
1795 // before volatile stores and after volatile loads. These nearly cover
1796 // requirement (1) but miss the volatile-store-volatile-load case. This final
1797 // case is placed after volatile-stores although it could just as well go
1798 // before volatile-loads.
1801 void LIRGenerator::do_StoreField(StoreField* x) {
1802 bool needs_patching = x->needs_patching();
1803 bool is_volatile = x->field()->is_volatile();
1804 BasicType field_type = x->field_type();
1805 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1807 CodeEmitInfo* info = NULL;
1808 if (needs_patching) {
1809 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1810 info = state_for(x, x->state_before());
1811 } else if (x->needs_null_check()) {
1812 NullCheck* nc = x->explicit_null_check();
1813 if (nc == NULL) {
1814 info = state_for(x);
1815 } else {
1816 info = state_for(nc);
1817 }
1818 }
1821 LIRItem object(x->obj(), this);
1822 LIRItem value(x->value(), this);
1824 object.load_item();
1826 if (is_volatile || needs_patching) {
1827 // load item if field is volatile (fewer special cases for volatiles)
1828 // load item if field not initialized
1829 // load item if field not constant
1830 // because of code patching we cannot inline constants
1831 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1832 value.load_byte_item();
1833 } else {
1834 value.load_item();
1835 }
1836 } else {
1837 value.load_for_store(field_type);
1838 }
1840 set_no_result(x);
1842 #ifndef PRODUCT
1843 if (PrintNotLoaded && needs_patching) {
1844 tty->print_cr(" ###class not loaded at store_%s bci %d",
1845 x->is_static() ? "static" : "field", x->printable_bci());
1846 }
1847 #endif
1849 if (x->needs_null_check() &&
1850 (needs_patching ||
1851 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1852 // Emit an explicit null check because the offset is too large.
1853 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1854 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1855 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1856 }
1858 LIR_Address* address;
1859 if (needs_patching) {
1860 // we need to patch the offset in the instruction so don't allow
1861 // generate_address to try to be smart about emitting the -1.
1862 // Otherwise the patching code won't know how to find the
1863 // instruction to patch.
1864 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1865 } else {
1866 address = generate_address(object.result(), x->offset(), field_type);
1867 }
1869 if (is_volatile && os::is_MP()) {
1870 __ membar_release();
1871 }
1873 if (is_oop) {
1874 // Do the pre-write barrier, if any.
1875 pre_barrier(LIR_OprFact::address(address),
1876 LIR_OprFact::illegalOpr /* pre_val */,
1877 true /* do_load*/,
1878 needs_patching,
1879 (info ? new CodeEmitInfo(info) : NULL));
1880 }
1882 if (is_volatile && !needs_patching) {
1883 volatile_field_store(value.result(), address, info);
1884 } else {
1885 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1886 __ store(value.result(), address, info, patch_code);
1887 }
1889 if (is_oop) {
1890 // Store to object so mark the card of the header
1891 post_barrier(object.result(), value.result());
1892 }
1894 if (is_volatile && os::is_MP()) {
1895 __ membar();
1896 }
1897 }
1900 void LIRGenerator::do_LoadField(LoadField* x) {
1901 bool needs_patching = x->needs_patching();
1902 bool is_volatile = x->field()->is_volatile();
1903 BasicType field_type = x->field_type();
1905 CodeEmitInfo* info = NULL;
1906 if (needs_patching) {
1907 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1908 info = state_for(x, x->state_before());
1909 } else if (x->needs_null_check()) {
1910 NullCheck* nc = x->explicit_null_check();
1911 if (nc == NULL) {
1912 info = state_for(x);
1913 } else {
1914 info = state_for(nc);
1915 }
1916 }
1918 LIRItem object(x->obj(), this);
1920 object.load_item();
1922 #ifndef PRODUCT
1923 if (PrintNotLoaded && needs_patching) {
1924 tty->print_cr(" ###class not loaded at load_%s bci %d",
1925 x->is_static() ? "static" : "field", x->printable_bci());
1926 }
1927 #endif
1929 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1930 if (x->needs_null_check() &&
1931 (needs_patching ||
1932 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1933 stress_deopt)) {
1934 LIR_Opr obj = object.result();
1935 if (stress_deopt) {
1936 obj = new_register(T_OBJECT);
1937 __ move(LIR_OprFact::oopConst(NULL), obj);
1938 }
1939 // Emit an explicit null check because the offset is too large.
1940 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1941 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1942 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1943 }
1945 LIR_Opr reg = rlock_result(x, field_type);
1946 LIR_Address* address;
1947 if (needs_patching) {
1948 // we need to patch the offset in the instruction so don't allow
1949 // generate_address to try to be smart about emitting the -1.
1950 // Otherwise the patching code won't know how to find the
1951 // instruction to patch.
1952 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1953 } else {
1954 address = generate_address(object.result(), x->offset(), field_type);
1955 }
1957 if (is_volatile && !needs_patching) {
1958 volatile_field_load(address, reg, info);
1959 } else {
1960 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1961 __ load(address, reg, info, patch_code);
1962 }
1964 if (is_volatile && os::is_MP()) {
1965 __ membar_acquire();
1966 }
1967 }
1970 //------------------------java.nio.Buffer.checkIndex------------------------
1972 // int java.nio.Buffer.checkIndex(int)
1973 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1974 // NOTE: by the time we are in checkIndex() we are guaranteed that
1975 // the buffer is non-null (because checkIndex is package-private and
1976 // only called from within other methods in the buffer).
1977 assert(x->number_of_arguments() == 2, "wrong type");
1978 LIRItem buf (x->argument_at(0), this);
1979 LIRItem index(x->argument_at(1), this);
1980 buf.load_item();
1981 index.load_item();
1983 LIR_Opr result = rlock_result(x);
1984 if (GenerateRangeChecks) {
1985 CodeEmitInfo* info = state_for(x);
1986 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1987 if (index.result()->is_constant()) {
1988 #ifndef MIPS
1989 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1990 __ branch(lir_cond_belowEqual, T_INT, stub);
1991 #else
1992 LIR_Opr left = LIR_OprFact::address(new LIR_Address( buf.result(),
1993 java_nio_Buffer::limit_offset(),T_INT));
1994 LIR_Opr right = LIR_OprFact::intConst(index.result()->as_jint());
1995 __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
1996 __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
1998 #endif
1999 } else {
2000 #ifndef MIPS
2001 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
2002 java_nio_Buffer::limit_offset(), T_INT, info);
2003 __ branch(lir_cond_aboveEqual, T_INT, stub);
2004 #else
2005 LIR_Opr right = LIR_OprFact::address(new LIR_Address( buf.result(), java_nio_Buffer::limit_offset(),T_INT));
2006 LIR_Opr left = index.result();
2007 __ null_check_for_branch(lir_cond_aboveEqual, left, right, info);
2008 __ branch(lir_cond_aboveEqual, left, right , T_INT, stub); // forward branch
2009 #endif
2010 }
2011 __ move(index.result(), result);
2012 } else {
2013 // Just load the index into the result register
2014 __ move(index.result(), result);
2015 }
2016 }
2019 //------------------------array access--------------------------------------
2022 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
2023 LIRItem array(x->array(), this);
2024 array.load_item();
2025 LIR_Opr reg = rlock_result(x);
2027 CodeEmitInfo* info = NULL;
2028 if (x->needs_null_check()) {
2029 NullCheck* nc = x->explicit_null_check();
2030 if (nc == NULL) {
2031 info = state_for(x);
2032 } else {
2033 info = state_for(nc);
2034 }
2035 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
2036 LIR_Opr obj = new_register(T_OBJECT);
2037 __ move(LIR_OprFact::oopConst(NULL), obj);
2038 __ null_check(obj, new CodeEmitInfo(info));
2039 }
2040 }
2041 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
2042 }
2045 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
2046 bool use_length = x->length() != NULL;
2047 LIRItem array(x->array(), this);
2048 LIRItem index(x->index(), this);
2049 LIRItem length(this);
2050 bool needs_range_check = x->compute_needs_range_check();
2052 if (use_length && needs_range_check) {
2053 length.set_instruction(x->length());
2054 length.load_item();
2055 }
2057 array.load_item();
2058 if (index.is_constant() && can_inline_as_constant(x->index())) {
2059 // let it be a constant
2060 index.dont_load_item();
2061 } else {
2062 index.load_item();
2063 }
2065 CodeEmitInfo* range_check_info = state_for(x);
2066 CodeEmitInfo* null_check_info = NULL;
2067 if (x->needs_null_check()) {
2068 NullCheck* nc = x->explicit_null_check();
2069 if (nc != NULL) {
2070 null_check_info = state_for(nc);
2071 } else {
2072 null_check_info = range_check_info;
2073 }
2074 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
2075 LIR_Opr obj = new_register(T_OBJECT);
2076 __ move(LIR_OprFact::oopConst(NULL), obj);
2077 __ null_check(obj, new CodeEmitInfo(null_check_info));
2078 }
2079 }
2081 // emit array address setup early so it schedules better
2082 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
2084 if (GenerateRangeChecks && needs_range_check) {
2085 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2086 #ifndef MIPS
2087 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
2088 #else
2089 tty->print_cr("LIRGenerator::do_LoadIndexed(LoadIndexed* x) unimplemented yet!");
2090 Unimplemented();
2091 #endif
2092 } else if (use_length) {
2093 // TODO: use a (modified) version of array_range_check that does not require a
2094 // constant length to be loaded to a register
2095 #ifndef MIPS
2096 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2097 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
2098 #else
2099 __ branch(lir_cond_belowEqual, length.result(), index.result(),T_INT, new RangeCheckStub(range_check_info, index.result()));
2100 #endif
2101 } else {
2102 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2103 // The range check performs the null check, so clear it out for the load
2104 null_check_info = NULL;
2105 }
2106 }
2108 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
2109 }
2112 void LIRGenerator::do_NullCheck(NullCheck* x) {
2113 if (x->can_trap()) {
2114 LIRItem value(x->obj(), this);
2115 value.load_item();
2116 CodeEmitInfo* info = state_for(x);
2117 __ null_check(value.result(), info);
2118 }
2119 }
2122 void LIRGenerator::do_TypeCast(TypeCast* x) {
2123 LIRItem value(x->obj(), this);
2124 value.load_item();
2125 // the result is the same as from the node we are casting
2126 set_result(x, value.result());
2127 }
2130 void LIRGenerator::do_Throw(Throw* x) {
2131 LIRItem exception(x->exception(), this);
2132 exception.load_item();
2133 set_no_result(x);
2134 LIR_Opr exception_opr = exception.result();
2135 CodeEmitInfo* info = state_for(x, x->state());
2137 #ifndef PRODUCT
2138 if (PrintC1Statistics) {
2139 increment_counter(Runtime1::throw_count_address(), T_INT);
2140 }
2141 #endif
2143 // check if the instruction has an xhandler in any of the nested scopes
2144 bool unwind = false;
2145 if (info->exception_handlers()->length() == 0) {
2146 // this throw is not inside an xhandler
2147 unwind = true;
2148 } else {
2149 // get some idea of the throw type
2150 bool type_is_exact = true;
2151 ciType* throw_type = x->exception()->exact_type();
2152 if (throw_type == NULL) {
2153 type_is_exact = false;
2154 throw_type = x->exception()->declared_type();
2155 }
2156 if (throw_type != NULL && throw_type->is_instance_klass()) {
2157 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2158 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2159 }
2160 }
2162 // do null check before moving exception oop into fixed register
2163 // to avoid a fixed interval with an oop during the null check.
2164 // Use a copy of the CodeEmitInfo because debug information is
2165 // different for null_check and throw.
2166 if (GenerateCompilerNullChecks &&
2167 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2168 // if the exception object wasn't created using new then it might be null.
2169 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2170 }
2172 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2173 // we need to go through the exception lookup path to get JVMTI
2174 // notification done
2175 unwind = false;
2176 }
2178 // move exception oop into fixed register
2179 __ move(exception_opr, exceptionOopOpr());
2181 if (unwind) {
2182 __ unwind_exception(exceptionOopOpr());
2183 } else {
2184 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2185 }
2186 }
2189 void LIRGenerator::do_RoundFP(RoundFP* x) {
2190 LIRItem input(x->input(), this);
2191 input.load_item();
2192 LIR_Opr input_opr = input.result();
2193 assert(input_opr->is_register(), "why round if value is not in a register?");
2194 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2195 if (input_opr->is_single_fpu()) {
2196 set_result(x, round_item(input_opr)); // This code path not currently taken
2197 } else {
2198 LIR_Opr result = new_register(T_DOUBLE);
2199 set_vreg_flag(result, must_start_in_memory);
2200 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2201 set_result(x, result);
2202 }
2203 }
2205 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
2206 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2207 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2208 LIRItem base(x->base(), this);
2209 LIRItem idx(this);
2211 base.load_item();
2212 if (x->has_index()) {
2213 idx.set_instruction(x->index());
2214 idx.load_nonconstant();
2215 }
2217 LIR_Opr reg = rlock_result(x, x->basic_type());
2219 int log2_scale = 0;
2220 if (x->has_index()) {
2221 log2_scale = x->log2_scale();
2222 }
2224 assert(!x->has_index() || idx.value() == x->index(), "should match");
2226 LIR_Opr base_op = base.result();
2227 LIR_Opr index_op = idx.result();
2228 #ifndef _LP64
2229 if (base_op->type() == T_LONG) {
2230 base_op = new_register(T_INT);
2231 __ convert(Bytecodes::_l2i, base.result(), base_op);
2232 }
2233 if (x->has_index()) {
2234 if (index_op->type() == T_LONG) {
2235 LIR_Opr long_index_op = index_op;
2236 if (index_op->is_constant()) {
2237 long_index_op = new_register(T_LONG);
2238 __ move(index_op, long_index_op);
2239 }
2240 index_op = new_register(T_INT);
2241 __ convert(Bytecodes::_l2i, long_index_op, index_op);
2242 } else {
2243 assert(x->index()->type()->tag() == intTag, "must be");
2244 }
2245 }
2246 // At this point base and index should be all ints.
2247 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2248 assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2249 #else
2250 if (x->has_index()) {
2251 if (index_op->type() == T_INT) {
2252 if (!index_op->is_constant()) {
2253 index_op = new_register(T_LONG);
2254 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2255 }
2256 } else {
2257 assert(index_op->type() == T_LONG, "must be");
2258 if (index_op->is_constant()) {
2259 index_op = new_register(T_LONG);
2260 __ move(idx.result(), index_op);
2261 }
2262 }
2263 }
2264 // At this point base is a long non-constant
2265 // Index is a long register or a int constant.
2266 // We allow the constant to stay an int because that would allow us a more compact encoding by
2267 // embedding an immediate offset in the address expression. If we have a long constant, we have to
2268 // move it into a register first.
2269 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2270 assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2271 (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2272 #endif
2274 BasicType dst_type = x->basic_type();
2276 LIR_Address* addr;
2277 if (index_op->is_constant()) {
2278 assert(log2_scale == 0, "must not have a scale");
2279 assert(index_op->type() == T_INT, "only int constants supported");
2280 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2281 } else {
2282 #ifdef X86
2283 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2284 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2285 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2286 #else
2287 if (index_op->is_illegal() || log2_scale == 0) {
2288 #ifndef MIPS
2289 addr = new LIR_Address(base_op, index_op, dst_type);
2290 #else
2291 #ifdef _LP64
2292 LIR_Opr ptr = new_register(T_LONG);
2293 #else
2294 LIR_Opr ptr = new_register(T_INT);
2295 #endif
2296 __ move(base_op, ptr);
2297 if(index_op -> is_valid())
2298 __ add(ptr, index_op, ptr);
2299 addr = new LIR_Address(ptr, 0, dst_type);
2300 #endif
2301 } else {
2302 LIR_Opr tmp = new_pointer_register();
2303 __ shift_left(index_op, log2_scale, tmp);
2304 addr = new LIR_Address(base_op, tmp, dst_type);
2305 }
2306 #endif
2307 }
2309 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2310 __ unaligned_move(addr, reg);
2311 } else {
2312 if (dst_type == T_OBJECT && x->is_wide()) {
2313 __ move_wide(addr, reg);
2314 } else {
2315 __ move(addr, reg);
2316 }
2317 }
2318 }
2321 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2322 int log2_scale = 0;
2323 BasicType type = x->basic_type();
2325 if (x->has_index()) {
2326 log2_scale = x->log2_scale();
2327 }
2329 LIRItem base(x->base(), this);
2330 LIRItem value(x->value(), this);
2331 LIRItem idx(this);
2333 base.load_item();
2334 if (x->has_index()) {
2335 idx.set_instruction(x->index());
2336 idx.load_item();
2337 }
2339 if (type == T_BYTE || type == T_BOOLEAN) {
2340 value.load_byte_item();
2341 } else {
2342 value.load_item();
2343 }
2345 set_no_result(x);
2347 LIR_Opr base_op = base.result();
2348 LIR_Opr index_op = idx.result();
2350 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2351 LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2352 #else
2353 #ifndef _LP64
2354 if (base_op->type() == T_LONG) {
2355 base_op = new_register(T_INT);
2356 __ convert(Bytecodes::_l2i, base.result(), base_op);
2357 }
2358 if (x->has_index()) {
2359 if (index_op->type() == T_LONG) {
2360 index_op = new_register(T_INT);
2361 __ convert(Bytecodes::_l2i, idx.result(), index_op);
2362 }
2363 }
2364 // At this point base and index should be all ints and not constants
2365 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2366 assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2367 #else
2368 if (x->has_index()) {
2369 if (index_op->type() == T_INT) {
2370 index_op = new_register(T_LONG);
2371 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2372 }
2373 }
2374 // At this point base and index are long and non-constant
2375 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2376 assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2377 #endif
2379 if (log2_scale != 0) {
2380 // temporary fix (platform dependent code without shift on Intel would be better)
2381 // TODO: ARM also allows embedded shift in the address
2382 LIR_Opr tmp = new_pointer_register();
2383 if (TwoOperandLIRForm) {
2384 __ move(index_op, tmp);
2385 index_op = tmp;
2386 }
2387 __ shift_left(index_op, log2_scale, tmp);
2388 if (!TwoOperandLIRForm) {
2389 index_op = tmp;
2390 }
2391 }
2393 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2394 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2395 __ move(value.result(), addr);
2396 }
2399 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2400 BasicType type = x->basic_type();
2401 LIRItem src(x->object(), this);
2402 LIRItem off(x->offset(), this);
2404 off.load_item();
2405 src.load_item();
2407 LIR_Opr value = rlock_result(x, x->basic_type());
2409 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2411 #if INCLUDE_ALL_GCS
2412 // We might be reading the value of the referent field of a
2413 // Reference object in order to attach it back to the live
2414 // object graph. If G1 is enabled then we need to record
2415 // the value that is being returned in an SATB log buffer.
2416 //
2417 // We need to generate code similar to the following...
2418 //
2419 // if (offset == java_lang_ref_Reference::referent_offset) {
2420 // if (src != NULL) {
2421 // if (klass(src)->reference_type() != REF_NONE) {
2422 // pre_barrier(..., value, ...);
2423 // }
2424 // }
2425 // }
2427 if (UseG1GC && type == T_OBJECT) {
2428 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2429 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2430 bool gen_source_check = true; // Assume we need to check the src object for null.
2431 bool gen_type_check = true; // Assume we need to check the reference_type.
2433 if (off.is_constant()) {
2434 jlong off_con = (off.type()->is_int() ?
2435 (jlong) off.get_jint_constant() :
2436 off.get_jlong_constant());
2439 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2440 // The constant offset is something other than referent_offset.
2441 // We can skip generating/checking the remaining guards and
2442 // skip generation of the code stub.
2443 gen_pre_barrier = false;
2444 } else {
2445 // The constant offset is the same as referent_offset -
2446 // we do not need to generate a runtime offset check.
2447 gen_offset_check = false;
2448 }
2449 }
2451 // We don't need to generate stub if the source object is an array
2452 if (gen_pre_barrier && src.type()->is_array()) {
2453 gen_pre_barrier = false;
2454 }
2456 if (gen_pre_barrier) {
2457 // We still need to continue with the checks.
2458 if (src.is_constant()) {
2459 ciObject* src_con = src.get_jobject_constant();
2460 guarantee(src_con != NULL, "no source constant");
2462 if (src_con->is_null_object()) {
2463 // The constant src object is null - We can skip
2464 // generating the code stub.
2465 gen_pre_barrier = false;
2466 } else {
2467 // Non-null constant source object. We still have to generate
2468 // the slow stub - but we don't need to generate the runtime
2469 // null object check.
2470 gen_source_check = false;
2471 }
2472 }
2473 }
2474 if (gen_pre_barrier && !PatchALot) {
2475 // Can the klass of object be statically determined to be
2476 // a sub-class of Reference?
2477 ciType* type = src.value()->declared_type();
2478 if ((type != NULL) && type->is_loaded()) {
2479 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2480 gen_type_check = false;
2481 } else if (type->is_klass() &&
2482 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2483 // Not Reference and not Object klass.
2484 gen_pre_barrier = false;
2485 }
2486 }
2487 }
2489 if (gen_pre_barrier) {
2490 LabelObj* Lcont = new LabelObj();
2492 // We can have generate one runtime check here. Let's start with
2493 // the offset check.
2494 if (gen_offset_check) {
2495 // if (offset != referent_offset) -> continue
2496 // If offset is an int then we can do the comparison with the
2497 // referent_offset constant; otherwise we need to move
2498 // referent_offset into a temporary register and generate
2499 // a reg-reg compare.
2501 LIR_Opr referent_off;
2503 if (off.type()->is_int()) {
2504 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2505 } else {
2506 assert(off.type()->is_long(), "what else?");
2507 referent_off = new_register(T_LONG);
2508 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2509 }
2510 #ifndef MIPS
2511 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2512 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2513 #else
2514 __ branch(lir_cond_notEqual, off.result(), referent_off, Lcont->label());
2515 #endif
2516 }
2517 if (gen_source_check) {
2518 // offset is a const and equals referent offset
2519 // if (source == null) -> continue
2520 #ifndef MIPS
2521 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2522 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2523 #else
2524 __ branch(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL), Lcont->label());
2525 #endif
2526 }
2527 LIR_Opr src_klass = new_register(T_OBJECT);
2528 if (gen_type_check) {
2529 // We have determined that offset == referent_offset && src != null.
2530 // if (src->_klass->_reference_type == REF_NONE) -> continue
2531 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2532 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2533 LIR_Opr reference_type = new_register(T_INT);
2534 __ move(reference_type_addr, reference_type);
2535 #ifndef MIPS
2536 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2537 __ branch(lir_cond_equal, T_INT, Lcont->label());
2538 #else
2539 __ branch(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE), Lcont->label());
2540 #endif
2541 }
2542 {
2543 // We have determined that src->_klass->_reference_type != REF_NONE
2544 // so register the value in the referent field with the pre-barrier.
2545 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2546 value /* pre_val */,
2547 false /* do_load */,
2548 false /* patch */,
2549 NULL /* info */);
2550 }
2551 __ branch_destination(Lcont->label());
2552 }
2553 }
2554 #endif // INCLUDE_ALL_GCS
2556 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2557 }
2560 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2561 BasicType type = x->basic_type();
2562 LIRItem src(x->object(), this);
2563 LIRItem off(x->offset(), this);
2564 LIRItem data(x->value(), this);
2566 src.load_item();
2567 if (type == T_BOOLEAN || type == T_BYTE) {
2568 data.load_byte_item();
2569 } else {
2570 data.load_item();
2571 }
2572 off.load_item();
2574 set_no_result(x);
2576 if (x->is_volatile() && os::is_MP()) __ membar_release();
2577 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2578 if (x->is_volatile() && os::is_MP()) __ membar();
2579 }
2582 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2583 LIRItem src(x->object(), this);
2584 LIRItem off(x->offset(), this);
2586 src.load_item();
2587 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2588 // let it be a constant
2589 off.dont_load_item();
2590 } else {
2591 off.load_item();
2592 }
2594 set_no_result(x);
2596 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2597 __ prefetch(addr, is_store);
2598 }
2601 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2602 do_UnsafePrefetch(x, false);
2603 }
2606 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2607 do_UnsafePrefetch(x, true);
2608 }
2611 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2612 int lng = x->length();
2614 for (int i = 0; i < lng; i++) {
2615 SwitchRange* one_range = x->at(i);
2616 int low_key = one_range->low_key();
2617 int high_key = one_range->high_key();
2618 BlockBegin* dest = one_range->sux();
2619 if (low_key == high_key) {
2620 #ifndef MIPS
2621 __ cmp(lir_cond_equal, value, low_key);
2622 __ branch(lir_cond_equal, T_INT, dest);
2623 #else
2624 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(low_key), T_INT, dest);
2625 #endif
2626 } else if (high_key - low_key == 1) {
2627 #ifndef MIPS
2628 __ cmp(lir_cond_equal, value, low_key);
2629 __ branch(lir_cond_equal, T_INT, dest);
2630 __ cmp(lir_cond_equal, value, high_key);
2631 __ branch(lir_cond_equal, T_INT, dest);
2632 #else
2633 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(low_key), T_INT, dest);
2634 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(high_key), T_INT, dest);
2636 #endif
2637 } else {
2638 LabelObj* L = new LabelObj();
2639 #ifndef MIPS
2640 __ cmp(lir_cond_less, value, low_key);
2641 __ branch(lir_cond_less, T_INT, L->label());
2642 __ cmp(lir_cond_lessEqual, value, high_key);
2643 __ branch(lir_cond_lessEqual, T_INT, dest);
2644 __ branch_destination(L->label());
2645 #else
2646 __ branch(lir_cond_less, value, LIR_OprFact::intConst(low_key), L->label());
2647 __ branch(lir_cond_lessEqual, value, LIR_OprFact::intConst(high_key), T_INT, dest);
2648 __ branch_destination(L->label());
2649 #endif
2650 }
2651 }
2652 __ jump(default_sux);
2653 }
2656 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2657 SwitchRangeList* res = new SwitchRangeList();
2658 int len = x->length();
2659 if (len > 0) {
2660 BlockBegin* sux = x->sux_at(0);
2661 int key = x->lo_key();
2662 BlockBegin* default_sux = x->default_sux();
2663 SwitchRange* range = new SwitchRange(key, sux);
2664 for (int i = 0; i < len; i++, key++) {
2665 BlockBegin* new_sux = x->sux_at(i);
2666 if (sux == new_sux) {
2667 // still in same range
2668 range->set_high_key(key);
2669 } else {
2670 // skip tests which explicitly dispatch to the default
2671 if (sux != default_sux) {
2672 res->append(range);
2673 }
2674 range = new SwitchRange(key, new_sux);
2675 }
2676 sux = new_sux;
2677 }
2678 if (res->length() == 0 || res->last() != range) res->append(range);
2679 }
2680 return res;
2681 }
2684 // we expect the keys to be sorted by increasing value
2685 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2686 SwitchRangeList* res = new SwitchRangeList();
2687 int len = x->length();
2688 if (len > 0) {
2689 BlockBegin* default_sux = x->default_sux();
2690 int key = x->key_at(0);
2691 BlockBegin* sux = x->sux_at(0);
2692 SwitchRange* range = new SwitchRange(key, sux);
2693 for (int i = 1; i < len; i++) {
2694 int new_key = x->key_at(i);
2695 BlockBegin* new_sux = x->sux_at(i);
2696 if (key+1 == new_key && sux == new_sux) {
2697 // still in same range
2698 range->set_high_key(new_key);
2699 } else {
2700 // skip tests which explicitly dispatch to the default
2701 if (range->sux() != default_sux) {
2702 res->append(range);
2703 }
2704 range = new SwitchRange(new_key, new_sux);
2705 }
2706 key = new_key;
2707 sux = new_sux;
2708 }
2709 if (res->length() == 0 || res->last() != range) res->append(range);
2710 }
2711 return res;
2712 }
2715 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2716 LIRItem tag(x->tag(), this);
2717 tag.load_item();
2718 set_no_result(x);
2720 if (x->is_safepoint()) {
2721 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2722 }
2724 // move values into phi locations
2725 move_to_phi(x->state());
2727 int lo_key = x->lo_key();
2728 int hi_key = x->hi_key();
2729 int len = x->length();
2730 LIR_Opr value = tag.result();
2731 if (UseTableRanges) {
2732 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2733 } else {
2734 for (int i = 0; i < len; i++) {
2735 #ifndef MIPS
2736 __ cmp(lir_cond_equal, value, i + lo_key);
2737 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2738 #else
2739 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(i+lo_key), T_INT, x->sux_at(i));
2740 #endif
2741 }
2742 __ jump(x->default_sux());
2743 }
2744 }
2747 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2748 LIRItem tag(x->tag(), this);
2749 tag.load_item();
2750 set_no_result(x);
2752 if (x->is_safepoint()) {
2753 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2754 }
2756 // move values into phi locations
2757 move_to_phi(x->state());
2759 LIR_Opr value = tag.result();
2760 if (UseTableRanges) {
2761 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2762 } else {
2763 int len = x->length();
2764 for (int i = 0; i < len; i++) {
2765 #ifndef MIPS
2766 __ cmp(lir_cond_equal, value, x->key_at(i));
2767 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2768 #else
2769 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(x->key_at(i)), T_INT, x->sux_at(i));
2770 #endif
2771 }
2772 __ jump(x->default_sux());
2773 }
2774 }
2777 void LIRGenerator::do_Goto(Goto* x) {
2778 set_no_result(x);
2780 if (block()->next()->as_OsrEntry()) {
2781 // need to free up storage used for OSR entry point
2782 LIR_Opr osrBuffer = block()->next()->operand();
2783 BasicTypeList signature;
2784 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2785 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2786 __ move(osrBuffer, cc->args()->at(0));
2787 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2788 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2789 }
2791 if (x->is_safepoint()) {
2792 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2794 // increment backedge counter if needed
2795 CodeEmitInfo* info = state_for(x, state);
2796 increment_backedge_counter(info, x->profiled_bci());
2797 CodeEmitInfo* safepoint_info = state_for(x, state);
2798 __ safepoint(safepoint_poll_register(), safepoint_info);
2799 }
2801 // Gotos can be folded Ifs, handle this case.
2802 if (x->should_profile()) {
2803 ciMethod* method = x->profiled_method();
2804 assert(method != NULL, "method should be set if branch is profiled");
2805 ciMethodData* md = method->method_data_or_null();
2806 assert(md != NULL, "Sanity");
2807 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2808 assert(data != NULL, "must have profiling data");
2809 int offset;
2810 if (x->direction() == Goto::taken) {
2811 assert(data->is_BranchData(), "need BranchData for two-way branches");
2812 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2813 } else if (x->direction() == Goto::not_taken) {
2814 assert(data->is_BranchData(), "need BranchData for two-way branches");
2815 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2816 } else {
2817 assert(data->is_JumpData(), "need JumpData for branches");
2818 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2819 }
2820 LIR_Opr md_reg = new_register(T_METADATA);
2821 __ metadata2reg(md->constant_encoding(), md_reg);
2822 increment_counter(new LIR_Address(md_reg, offset,
2823 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2824 }
2826 // emit phi-instruction move after safepoint since this simplifies
2827 // describing the state as the safepoint.
2828 move_to_phi(x->state());
2830 __ jump(x->default_sux());
2831 }
2833 /**
2834 * Emit profiling code if needed for arguments, parameters, return value types
2835 *
2836 * @param md MDO the code will update at runtime
2837 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2838 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2839 * @param profiled_k current profile
2840 * @param obj IR node for the object to be profiled
2841 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2842 * Set once we find an update to make and use for next ones.
2843 * @param not_null true if we know obj cannot be null
2844 * @param signature_at_call_k signature at call for obj
2845 * @param callee_signature_k signature of callee for obj
2846 * at call and callee signatures differ at method handle call
2847 * @return the only klass we know will ever be seen at this profile point
2848 */
2849 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2850 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2851 ciKlass* callee_signature_k) {
2852 ciKlass* result = NULL;
2853 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2854 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2855 // known not to be null or null bit already set and already set to
2856 // unknown: nothing we can do to improve profiling
2857 if (!do_null && !do_update) {
2858 return result;
2859 }
2861 ciKlass* exact_klass = NULL;
2862 Compilation* comp = Compilation::current();
2863 if (do_update) {
2864 // try to find exact type, using CHA if possible, so that loading
2865 // the klass from the object can be avoided
2866 ciType* type = obj->exact_type();
2867 if (type == NULL) {
2868 type = obj->declared_type();
2869 type = comp->cha_exact_type(type);
2870 }
2871 assert(type == NULL || type->is_klass(), "type should be class");
2872 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2874 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2875 }
2877 if (!do_null && !do_update) {
2878 return result;
2879 }
2881 ciKlass* exact_signature_k = NULL;
2882 if (do_update) {
2883 // Is the type from the signature exact (the only one possible)?
2884 exact_signature_k = signature_at_call_k->exact_klass();
2885 if (exact_signature_k == NULL) {
2886 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2887 } else {
2888 result = exact_signature_k;
2889 // Known statically. No need to emit any code: prevent
2890 // LIR_Assembler::emit_profile_type() from emitting useless code
2891 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2892 }
2893 // exact_klass and exact_signature_k can be both non NULL but
2894 // different if exact_klass is loaded after the ciObject for
2895 // exact_signature_k is created.
2896 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2897 // sometimes the type of the signature is better than the best type
2898 // the compiler has
2899 exact_klass = exact_signature_k;
2900 }
2901 if (callee_signature_k != NULL &&
2902 callee_signature_k != signature_at_call_k) {
2903 ciKlass* improved_klass = callee_signature_k->exact_klass();
2904 if (improved_klass == NULL) {
2905 improved_klass = comp->cha_exact_type(callee_signature_k);
2906 }
2907 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2908 exact_klass = exact_signature_k;
2909 }
2910 }
2911 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2912 }
2914 if (!do_null && !do_update) {
2915 return result;
2916 }
2918 if (mdp == LIR_OprFact::illegalOpr) {
2919 mdp = new_register(T_METADATA);
2920 __ metadata2reg(md->constant_encoding(), mdp);
2921 if (md_base_offset != 0) {
2922 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2923 mdp = new_pointer_register();
2924 __ leal(LIR_OprFact::address(base_type_address), mdp);
2925 }
2926 }
2927 LIRItem value(obj, this);
2928 value.load_item();
2929 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2930 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2931 return result;
2932 }
2934 // profile parameters on entry to the root of the compilation
2935 void LIRGenerator::profile_parameters(Base* x) {
2936 if (compilation()->profile_parameters()) {
2937 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2938 ciMethodData* md = scope()->method()->method_data_or_null();
2939 assert(md != NULL, "Sanity");
2941 if (md->parameters_type_data() != NULL) {
2942 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2943 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2944 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2945 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2946 LIR_Opr src = args->at(i);
2947 assert(!src->is_illegal(), "check");
2948 BasicType t = src->type();
2949 if (t == T_OBJECT || t == T_ARRAY) {
2950 intptr_t profiled_k = parameters->type(j);
2951 Local* local = x->state()->local_at(java_index)->as_Local();
2952 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2953 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2954 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2955 // If the profile is known statically set it once for all and do not emit any code
2956 if (exact != NULL) {
2957 md->set_parameter_type(j, exact);
2958 }
2959 j++;
2960 }
2961 java_index += type2size[t];
2962 }
2963 }
2964 }
2965 }
2967 void LIRGenerator::do_Base(Base* x) {
2968 __ std_entry(LIR_OprFact::illegalOpr);
2969 // Emit moves from physical registers / stack slots to virtual registers
2970 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2971 IRScope* irScope = compilation()->hir()->top_scope();
2972 int java_index = 0;
2973 for (int i = 0; i < args->length(); i++) {
2974 LIR_Opr src = args->at(i);
2975 assert(!src->is_illegal(), "check");
2976 BasicType t = src->type();
2978 // Types which are smaller than int are passed as int, so
2979 // correct the type which passed.
2980 switch (t) {
2981 case T_BYTE:
2982 case T_BOOLEAN:
2983 case T_SHORT:
2984 case T_CHAR:
2985 t = T_INT;
2986 break;
2987 }
2989 LIR_Opr dest = new_register(t);
2990 __ move(src, dest);
2992 // Assign new location to Local instruction for this local
2993 Local* local = x->state()->local_at(java_index)->as_Local();
2994 assert(local != NULL, "Locals for incoming arguments must have been created");
2995 #ifndef __SOFTFP__
2996 // The java calling convention passes double as long and float as int.
2997 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2998 #endif // __SOFTFP__
2999 local->set_operand(dest);
3000 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
3001 java_index += type2size[t];
3002 }
3004 if (compilation()->env()->dtrace_method_probes()) {
3005 BasicTypeList signature;
3006 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3007 signature.append(T_METADATA); // Method*
3008 LIR_OprList* args = new LIR_OprList();
3009 args->append(getThreadPointer());
3010 LIR_Opr meth = new_register(T_METADATA);
3011 __ metadata2reg(method()->constant_encoding(), meth);
3012 args->append(meth);
3013 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
3014 }
3016 if (method()->is_synchronized()) {
3017 LIR_Opr obj;
3018 if (method()->is_static()) {
3019 obj = new_register(T_OBJECT);
3020 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
3021 } else {
3022 Local* receiver = x->state()->local_at(0)->as_Local();
3023 assert(receiver != NULL, "must already exist");
3024 obj = receiver->operand();
3025 }
3026 assert(obj->is_valid(), "must be valid");
3028 if (method()->is_synchronized() && GenerateSynchronizationCode) {
3029 LIR_Opr lock = new_register(T_INT);
3030 __ load_stack_address_monitor(0, lock);
3032 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
3033 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3035 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
3036 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
3037 }
3038 }
3040 // increment invocation counters if needed
3041 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3042 profile_parameters(x);
3043 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
3044 increment_invocation_counter(info);
3045 }
3047 // all blocks with a successor must end with an unconditional jump
3048 // to the successor even if they are consecutive
3049 __ jump(x->default_sux());
3050 }
3053 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3054 // construct our frame and model the production of incoming pointer
3055 // to the OSR buffer.
3056 __ osr_entry(LIR_Assembler::osrBufferPointer());
3057 LIR_Opr result = rlock_result(x);
3058 __ move(LIR_Assembler::osrBufferPointer(), result);
3059 }
3062 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3063 assert(args->length() == arg_list->length(),
3064 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
3065 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3066 LIRItem* param = args->at(i);
3067 LIR_Opr loc = arg_list->at(i);
3068 if (loc->is_register()) {
3069 param->load_item_force(loc);
3070 } else {
3071 LIR_Address* addr = loc->as_address_ptr();
3072 param->load_for_store(addr->type());
3073 if (addr->type() == T_OBJECT) {
3074 __ move_wide(param->result(), addr);
3075 } else
3076 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3077 __ unaligned_move(param->result(), addr);
3078 } else {
3079 __ move(param->result(), addr);
3080 }
3081 }
3082 }
3084 if (x->has_receiver()) {
3085 LIRItem* receiver = args->at(0);
3086 LIR_Opr loc = arg_list->at(0);
3087 if (loc->is_register()) {
3088 receiver->load_item_force(loc);
3089 } else {
3090 assert(loc->is_address(), "just checking");
3091 receiver->load_for_store(T_OBJECT);
3092 __ move_wide(receiver->result(), loc->as_address_ptr());
3093 }
3094 }
3095 }
3098 // Visits all arguments, returns appropriate items without loading them
3099 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3100 LIRItemList* argument_items = new LIRItemList();
3101 if (x->has_receiver()) {
3102 LIRItem* receiver = new LIRItem(x->receiver(), this);
3103 argument_items->append(receiver);
3104 }
3105 for (int i = 0; i < x->number_of_arguments(); i++) {
3106 LIRItem* param = new LIRItem(x->argument_at(i), this);
3107 argument_items->append(param);
3108 }
3109 return argument_items;
3110 }
3113 // The invoke with receiver has following phases:
3114 // a) traverse and load/lock receiver;
3115 // b) traverse all arguments -> item-array (invoke_visit_argument)
3116 // c) push receiver on stack
3117 // d) load each of the items and push on stack
3118 // e) unlock receiver
3119 // f) move receiver into receiver-register %o0
3120 // g) lock result registers and emit call operation
3121 //
3122 // Before issuing a call, we must spill-save all values on stack
3123 // that are in caller-save register. "spill-save" moves those registers
3124 // either in a free callee-save register or spills them if no free
3125 // callee save register is available.
3126 //
3127 // The problem is where to invoke spill-save.
3128 // - if invoked between e) and f), we may lock callee save
3129 // register in "spill-save" that destroys the receiver register
3130 // before f) is executed
3131 // - if we rearrange f) to be earlier (by loading %o0) it
3132 // may destroy a value on the stack that is currently in %o0
3133 // and is waiting to be spilled
3134 // - if we keep the receiver locked while doing spill-save,
3135 // we cannot spill it as it is spill-locked
3136 //
3137 void LIRGenerator::do_Invoke(Invoke* x) {
3138 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
3140 LIR_OprList* arg_list = cc->args();
3141 LIRItemList* args = invoke_visit_arguments(x);
3142 LIR_Opr receiver = LIR_OprFact::illegalOpr;
3144 // setup result register
3145 LIR_Opr result_register = LIR_OprFact::illegalOpr;
3146 if (x->type() != voidType) {
3147 result_register = result_register_for(x->type());
3148 }
3150 CodeEmitInfo* info = state_for(x, x->state());
3152 invoke_load_arguments(x, args, arg_list);
3154 if (x->has_receiver()) {
3155 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
3156 receiver = args->at(0)->result();
3157 }
3159 // emit invoke code
3160 bool optimized = x->target_is_loaded() && x->target_is_final();
3161 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
3163 // JSR 292
3164 // Preserve the SP over MethodHandle call sites, if needed.
3165 ciMethod* target = x->target();
3166 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
3167 target->is_method_handle_intrinsic() ||
3168 target->is_compiled_lambda_form());
3169 if (is_method_handle_invoke) {
3170 info->set_is_method_handle_invoke(true);
3171 if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
3172 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
3173 }
3174 }
3176 switch (x->code()) {
3177 case Bytecodes::_invokestatic:
3178 __ call_static(target, result_register,
3179 SharedRuntime::get_resolve_static_call_stub(),
3180 arg_list, info);
3181 break;
3182 case Bytecodes::_invokespecial:
3183 case Bytecodes::_invokevirtual:
3184 case Bytecodes::_invokeinterface:
3185 // for final target we still produce an inline cache, in order
3186 // to be able to call mixed mode
3187 if (x->code() == Bytecodes::_invokespecial || optimized) {
3188 __ call_opt_virtual(target, receiver, result_register,
3189 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3190 arg_list, info);
3191 } else if (x->vtable_index() < 0) {
3192 __ call_icvirtual(target, receiver, result_register,
3193 SharedRuntime::get_resolve_virtual_call_stub(),
3194 arg_list, info);
3195 } else {
3196 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
3197 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
3198 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
3199 }
3200 break;
3201 case Bytecodes::_invokedynamic: {
3202 __ call_dynamic(target, receiver, result_register,
3203 SharedRuntime::get_resolve_static_call_stub(),
3204 arg_list, info);
3205 break;
3206 }
3207 default:
3208 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
3209 break;
3210 }
3212 // JSR 292
3213 // Restore the SP after MethodHandle call sites, if needed.
3214 if (is_method_handle_invoke
3215 && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
3216 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
3217 }
3219 if (x->type()->is_float() || x->type()->is_double()) {
3220 // Force rounding of results from non-strictfp when in strictfp
3221 // scope (or when we don't know the strictness of the callee, to
3222 // be safe.)
3223 if (method()->is_strict()) {
3224 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
3225 result_register = round_item(result_register);
3226 }
3227 }
3228 }
3230 if (result_register->is_valid()) {
3231 LIR_Opr result = rlock_result(x);
3232 __ move(result_register, result);
3233 }
3234 }
3237 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3238 assert(x->number_of_arguments() == 1, "wrong type");
3239 LIRItem value (x->argument_at(0), this);
3240 LIR_Opr reg = rlock_result(x);
3241 value.load_item();
3242 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3243 __ move(tmp, reg);
3244 }
3248 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3249 void LIRGenerator::do_IfOp(IfOp* x) {
3250 #ifdef ASSERT
3251 {
3252 ValueTag xtag = x->x()->type()->tag();
3253 ValueTag ttag = x->tval()->type()->tag();
3254 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3255 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3256 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3257 }
3258 #endif
3260 LIRItem left(x->x(), this);
3261 LIRItem right(x->y(), this);
3262 left.load_item();
3263 if (can_inline_as_constant(right.value())) {
3264 right.dont_load_item();
3265 } else {
3266 right.load_item();
3267 }
3269 LIRItem t_val(x->tval(), this);
3270 LIRItem f_val(x->fval(), this);
3271 t_val.dont_load_item();
3272 f_val.dont_load_item();
3273 LIR_Opr reg = rlock_result(x);
3275 #ifndef MIPS
3276 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3277 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3278 #else
3279 LIR_Opr opr1 = t_val.result();
3280 LIR_Opr opr2 = f_val.result();
3281 LabelObj* skip = new LabelObj();
3282 __ move(opr1, reg);
3283 __ branch(lir_cond(x->cond()), left.result(), right.result(), skip->label());
3284 __ move(opr2, reg);
3285 __ branch_destination(skip->label());
3286 #endif
3287 }
3289 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3290 assert(x->number_of_arguments() == expected_arguments, "wrong type");
3291 LIR_Opr reg = result_register_for(x->type());
3292 __ call_runtime_leaf(routine, getThreadTemp(),
3293 reg, new LIR_OprList());
3294 LIR_Opr result = rlock_result(x);
3295 __ move(reg, result);
3296 }
3298 #ifdef TRACE_HAVE_INTRINSICS
3299 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3300 LIR_Opr thread = getThreadPointer();
3301 LIR_Opr osthread = new_pointer_register();
3302 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3303 size_t thread_id_size = OSThread::thread_id_size();
3304 if (thread_id_size == (size_t) BytesPerLong) {
3305 LIR_Opr id = new_register(T_LONG);
3306 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3307 __ convert(Bytecodes::_l2i, id, rlock_result(x));
3308 } else if (thread_id_size == (size_t) BytesPerInt) {
3309 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3310 } else {
3311 ShouldNotReachHere();
3312 }
3313 }
3315 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3316 CodeEmitInfo* info = state_for(x);
3317 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3318 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3319 assert(info != NULL, "must have info");
3320 LIRItem arg(x->argument_at(1), this);
3321 arg.load_item();
3322 LIR_Opr klass = new_pointer_register();
3323 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3324 LIR_Opr id = new_register(T_LONG);
3325 ByteSize offset = TRACE_ID_OFFSET;
3326 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3327 __ move(trace_id_addr, id);
3328 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3329 __ store(id, trace_id_addr);
3330 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3331 __ move(id, rlock_result(x));
3332 }
3333 #endif
3335 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3336 switch (x->id()) {
3337 case vmIntrinsics::_intBitsToFloat :
3338 case vmIntrinsics::_doubleToRawLongBits :
3339 case vmIntrinsics::_longBitsToDouble :
3340 case vmIntrinsics::_floatToRawIntBits : {
3341 do_FPIntrinsics(x);
3342 break;
3343 }
3345 #ifdef TRACE_HAVE_INTRINSICS
3346 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
3347 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
3348 case vmIntrinsics::_counterTime:
3349 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
3350 break;
3351 #endif
3353 case vmIntrinsics::_currentTimeMillis:
3354 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3355 break;
3357 case vmIntrinsics::_nanoTime:
3358 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3359 break;
3361 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3362 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3363 case vmIntrinsics::_getClass: do_getClass(x); break;
3364 case vmIntrinsics::_currentThread: do_currentThread(x); break;
3366 case vmIntrinsics::_dlog: // fall through
3367 case vmIntrinsics::_dlog10: // fall through
3368 case vmIntrinsics::_dabs: // fall through
3369 case vmIntrinsics::_dsqrt: // fall through
3370 case vmIntrinsics::_dtan: // fall through
3371 case vmIntrinsics::_dsin : // fall through
3372 case vmIntrinsics::_dcos : // fall through
3373 case vmIntrinsics::_dexp : // fall through
3374 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3375 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3377 // java.nio.Buffer.checkIndex
3378 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3380 case vmIntrinsics::_compareAndSwapObject:
3381 do_CompareAndSwap(x, objectType);
3382 break;
3383 case vmIntrinsics::_compareAndSwapInt:
3384 do_CompareAndSwap(x, intType);
3385 break;
3386 case vmIntrinsics::_compareAndSwapLong:
3387 do_CompareAndSwap(x, longType);
3388 break;
3390 case vmIntrinsics::_loadFence :
3391 if (os::is_MP()) __ membar_acquire();
3392 break;
3393 case vmIntrinsics::_storeFence:
3394 if (os::is_MP()) __ membar_release();
3395 break;
3396 case vmIntrinsics::_fullFence :
3397 if (os::is_MP()) __ membar();
3398 break;
3400 case vmIntrinsics::_Reference_get:
3401 do_Reference_get(x);
3402 break;
3404 case vmIntrinsics::_updateCRC32:
3405 case vmIntrinsics::_updateBytesCRC32:
3406 case vmIntrinsics::_updateByteBufferCRC32:
3407 do_update_CRC32(x);
3408 break;
3410 default: ShouldNotReachHere(); break;
3411 }
3412 }
3414 void LIRGenerator::profile_arguments(ProfileCall* x) {
3415 if (compilation()->profile_arguments()) {
3416 int bci = x->bci_of_invoke();
3417 ciMethodData* md = x->method()->method_data_or_null();
3418 ciProfileData* data = md->bci_to_data(bci);
3419 if (data != NULL) {
3420 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3421 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3422 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3423 int base_offset = md->byte_offset_of_slot(data, extra);
3424 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3425 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3427 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3428 int start = 0;
3429 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3430 if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3431 // first argument is not profiled at call (method handle invoke)
3432 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3433 start = 1;
3434 }
3435 ciSignature* callee_signature = x->callee()->signature();
3436 // method handle call to virtual method
3437 bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3438 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3440 bool ignored_will_link;
3441 ciSignature* signature_at_call = NULL;
3442 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3443 ciSignatureStream signature_at_call_stream(signature_at_call);
3445 // if called through method handle invoke, some arguments may have been popped
3446 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3447 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3448 ciKlass* exact = profile_type(md, base_offset, off,
3449 args->type(i), x->profiled_arg_at(i+start), mdp,
3450 !x->arg_needs_null_check(i+start),
3451 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3452 if (exact != NULL) {
3453 md->set_argument_type(bci, i, exact);
3454 }
3455 }
3456 } else {
3457 #ifdef ASSERT
3458 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3459 int n = x->nb_profiled_args();
3460 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3461 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3462 "only at JSR292 bytecodes");
3463 #endif
3464 }
3465 }
3466 }
3467 }
3469 // profile parameters on entry to an inlined method
3470 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3471 if (compilation()->profile_parameters() && x->inlined()) {
3472 ciMethodData* md = x->callee()->method_data_or_null();
3473 if (md != NULL) {
3474 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3475 if (parameters_type_data != NULL) {
3476 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3477 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3478 bool has_receiver = !x->callee()->is_static();
3479 ciSignature* sig = x->callee()->signature();
3480 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3481 int i = 0; // to iterate on the Instructions
3482 Value arg = x->recv();
3483 bool not_null = false;
3484 int bci = x->bci_of_invoke();
3485 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3486 // The first parameter is the receiver so that's what we start
3487 // with if it exists. One exception is method handle call to
3488 // virtual method: the receiver is in the args list
3489 if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3490 i = 1;
3491 arg = x->profiled_arg_at(0);
3492 not_null = !x->arg_needs_null_check(0);
3493 }
3494 int k = 0; // to iterate on the profile data
3495 for (;;) {
3496 intptr_t profiled_k = parameters->type(k);
3497 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3498 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3499 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3500 // If the profile is known statically set it once for all and do not emit any code
3501 if (exact != NULL) {
3502 md->set_parameter_type(k, exact);
3503 }
3504 k++;
3505 if (k >= parameters_type_data->number_of_parameters()) {
3506 #ifdef ASSERT
3507 int extra = 0;
3508 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3509 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3510 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3511 extra += 1;
3512 }
3513 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3514 #endif
3515 break;
3516 }
3517 arg = x->profiled_arg_at(i);
3518 not_null = !x->arg_needs_null_check(i);
3519 i++;
3520 }
3521 }
3522 }
3523 }
3524 }
3526 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3527 // Need recv in a temporary register so it interferes with the other temporaries
3528 LIR_Opr recv = LIR_OprFact::illegalOpr;
3529 LIR_Opr mdo = new_register(T_OBJECT);
3530 // tmp is used to hold the counters on SPARC
3531 LIR_Opr tmp = new_pointer_register();
3533 if (x->nb_profiled_args() > 0) {
3534 profile_arguments(x);
3535 }
3537 // profile parameters on inlined method entry including receiver
3538 if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3539 profile_parameters_at_call(x);
3540 }
3542 if (x->recv() != NULL) {
3543 LIRItem value(x->recv(), this);
3544 value.load_item();
3545 recv = new_register(T_OBJECT);
3546 __ move(value.result(), recv);
3547 }
3548 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3549 }
3551 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3552 int bci = x->bci_of_invoke();
3553 ciMethodData* md = x->method()->method_data_or_null();
3554 ciProfileData* data = md->bci_to_data(bci);
3555 if (data != NULL) {
3556 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3557 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3558 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3560 bool ignored_will_link;
3561 ciSignature* signature_at_call = NULL;
3562 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3564 // The offset within the MDO of the entry to update may be too large
3565 // to be used in load/store instructions on some platforms. So have
3566 // profile_type() compute the address of the profile in a register.
3567 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3568 ret->type(), x->ret(), mdp,
3569 !x->needs_null_check(),
3570 signature_at_call->return_type()->as_klass(),
3571 x->callee()->signature()->return_type()->as_klass());
3572 if (exact != NULL) {
3573 md->set_return_type(bci, exact);
3574 }
3575 }
3576 }
3578 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3579 // We can safely ignore accessors here, since c2 will inline them anyway,
3580 // accessors are also always mature.
3581 if (!x->inlinee()->is_accessor()) {
3582 CodeEmitInfo* info = state_for(x, x->state(), true);
3583 // Notify the runtime very infrequently only to take care of counter overflows
3584 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3585 }
3586 }
3588 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3589 int freq_log = 0;
3590 int level = compilation()->env()->comp_level();
3591 if (level == CompLevel_limited_profile) {
3592 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3593 } else if (level == CompLevel_full_profile) {
3594 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3595 } else {
3596 ShouldNotReachHere();
3597 }
3598 // Increment the appropriate invocation/backedge counter and notify the runtime.
3599 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3600 }
3602 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3603 ciMethod *method, int frequency,
3604 int bci, bool backedge, bool notify) {
3605 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3606 int level = _compilation->env()->comp_level();
3607 assert(level > CompLevel_simple, "Shouldn't be here");
3609 int offset = -1;
3610 LIR_Opr counter_holder = NULL;
3611 if (level == CompLevel_limited_profile) {
3612 MethodCounters* counters_adr = method->ensure_method_counters();
3613 if (counters_adr == NULL) {
3614 bailout("method counters allocation failed");
3615 return;
3616 }
3617 counter_holder = new_pointer_register();
3618 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3619 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3620 MethodCounters::invocation_counter_offset());
3621 } else if (level == CompLevel_full_profile) {
3622 counter_holder = new_register(T_METADATA);
3623 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3624 MethodData::invocation_counter_offset());
3625 ciMethodData* md = method->method_data_or_null();
3626 assert(md != NULL, "Sanity");
3627 __ metadata2reg(md->constant_encoding(), counter_holder);
3628 } else {
3629 ShouldNotReachHere();
3630 }
3631 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3632 LIR_Opr result = new_register(T_INT);
3633 __ load(counter, result);
3634 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3635 __ store(result, counter);
3636 if (notify) {
3637 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3638 LIR_Opr meth = new_register(T_METADATA);
3639 __ metadata2reg(method->constant_encoding(), meth);
3640 __ logical_and(result, mask, result);
3641 #ifndef MIPS
3642 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3643 #endif
3644 // The bci for info can point to cmp for if's we want the if bci
3645 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3646 #ifndef MIPS
3647 __ branch(lir_cond_equal, T_INT, overflow);
3648 #else
3649 __ branch(lir_cond_equal, result, LIR_OprFact::intConst(0), T_INT, overflow);
3650 #endif
3651 __ branch_destination(overflow->continuation());
3652 }
3653 }
3655 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3656 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3657 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3659 if (x->pass_thread()) {
3660 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3661 args->append(getThreadPointer());
3662 }
3664 for (int i = 0; i < x->number_of_arguments(); i++) {
3665 Value a = x->argument_at(i);
3666 LIRItem* item = new LIRItem(a, this);
3667 item->load_item();
3668 args->append(item->result());
3669 signature->append(as_BasicType(a->type()));
3670 }
3672 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3673 if (x->type() == voidType) {
3674 set_no_result(x);
3675 } else {
3676 __ move(result, rlock_result(x));
3677 }
3678 }
3680 #ifdef ASSERT
3681 void LIRGenerator::do_Assert(Assert *x) {
3682 ValueTag tag = x->x()->type()->tag();
3683 If::Condition cond = x->cond();
3685 LIRItem xitem(x->x(), this);
3686 LIRItem yitem(x->y(), this);
3687 LIRItem* xin = &xitem;
3688 LIRItem* yin = &yitem;
3690 assert(tag == intTag, "Only integer assertions are valid!");
3692 xin->load_item();
3693 yin->dont_load_item();
3695 set_no_result(x);
3697 LIR_Opr left = xin->result();
3698 LIR_Opr right = yin->result();
3700 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3701 }
3702 #endif
3704 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3707 Instruction *a = x->x();
3708 Instruction *b = x->y();
3709 if (!a || StressRangeCheckElimination) {
3710 assert(!b || StressRangeCheckElimination, "B must also be null");
3712 CodeEmitInfo *info = state_for(x, x->state());
3713 CodeStub* stub = new PredicateFailedStub(info);
3715 __ jump(stub);
3716 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3717 int a_int = a->type()->as_IntConstant()->value();
3718 int b_int = b->type()->as_IntConstant()->value();
3720 bool ok = false;
3722 switch(x->cond()) {
3723 case Instruction::eql: ok = (a_int == b_int); break;
3724 case Instruction::neq: ok = (a_int != b_int); break;
3725 case Instruction::lss: ok = (a_int < b_int); break;
3726 case Instruction::leq: ok = (a_int <= b_int); break;
3727 case Instruction::gtr: ok = (a_int > b_int); break;
3728 case Instruction::geq: ok = (a_int >= b_int); break;
3729 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3730 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3731 default: ShouldNotReachHere();
3732 }
3734 if (ok) {
3736 CodeEmitInfo *info = state_for(x, x->state());
3737 CodeStub* stub = new PredicateFailedStub(info);
3739 __ jump(stub);
3740 }
3741 } else {
3743 ValueTag tag = x->x()->type()->tag();
3744 If::Condition cond = x->cond();
3745 LIRItem xitem(x->x(), this);
3746 LIRItem yitem(x->y(), this);
3747 LIRItem* xin = &xitem;
3748 LIRItem* yin = &yitem;
3750 assert(tag == intTag, "Only integer deoptimizations are valid!");
3752 xin->load_item();
3753 yin->dont_load_item();
3754 set_no_result(x);
3756 LIR_Opr left = xin->result();
3757 LIR_Opr right = yin->result();
3759 CodeEmitInfo *info = state_for(x, x->state());
3760 CodeStub* stub = new PredicateFailedStub(info);
3762 #ifndef MIPS
3763 __ cmp(lir_cond(cond), left, right);
3764 __ branch(lir_cond(cond), right->type(), stub);
3765 #else
3766 tty->print_cr("LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) unimplemented yet!");
3767 Unimplemented();
3768 #endif
3769 }
3770 }
3773 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3774 LIRItemList args(1);
3775 LIRItem value(arg1, this);
3776 args.append(&value);
3777 BasicTypeList signature;
3778 signature.append(as_BasicType(arg1->type()));
3780 return call_runtime(&signature, &args, entry, result_type, info);
3781 }
3784 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3785 LIRItemList args(2);
3786 LIRItem value1(arg1, this);
3787 LIRItem value2(arg2, this);
3788 args.append(&value1);
3789 args.append(&value2);
3790 BasicTypeList signature;
3791 signature.append(as_BasicType(arg1->type()));
3792 signature.append(as_BasicType(arg2->type()));
3794 return call_runtime(&signature, &args, entry, result_type, info);
3795 }
3798 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3799 address entry, ValueType* result_type, CodeEmitInfo* info) {
3800 // get a result register
3801 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3802 LIR_Opr result = LIR_OprFact::illegalOpr;
3803 if (result_type->tag() != voidTag) {
3804 result = new_register(result_type);
3805 phys_reg = result_register_for(result_type);
3806 }
3808 // move the arguments into the correct location
3809 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3810 assert(cc->length() == args->length(), "argument mismatch");
3811 for (int i = 0; i < args->length(); i++) {
3812 LIR_Opr arg = args->at(i);
3813 LIR_Opr loc = cc->at(i);
3814 if (loc->is_register()) {
3815 __ move(arg, loc);
3816 } else {
3817 LIR_Address* addr = loc->as_address_ptr();
3818 // if (!can_store_as_constant(arg)) {
3819 // LIR_Opr tmp = new_register(arg->type());
3820 // __ move(arg, tmp);
3821 // arg = tmp;
3822 // }
3823 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3824 __ unaligned_move(arg, addr);
3825 } else {
3826 __ move(arg, addr);
3827 }
3828 }
3829 }
3831 if (info) {
3832 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3833 } else {
3834 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3835 }
3836 if (result->is_valid()) {
3837 __ move(phys_reg, result);
3838 }
3839 return result;
3840 }
3843 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3844 address entry, ValueType* result_type, CodeEmitInfo* info) {
3845 // get a result register
3846 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3847 LIR_Opr result = LIR_OprFact::illegalOpr;
3848 if (result_type->tag() != voidTag) {
3849 result = new_register(result_type);
3850 phys_reg = result_register_for(result_type);
3851 }
3853 // move the arguments into the correct location
3854 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3856 assert(cc->length() == args->length(), "argument mismatch");
3857 for (int i = 0; i < args->length(); i++) {
3858 LIRItem* arg = args->at(i);
3859 LIR_Opr loc = cc->at(i);
3860 if (loc->is_register()) {
3861 arg->load_item_force(loc);
3862 } else {
3863 LIR_Address* addr = loc->as_address_ptr();
3864 arg->load_for_store(addr->type());
3865 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3866 __ unaligned_move(arg->result(), addr);
3867 } else {
3868 __ move(arg->result(), addr);
3869 }
3870 }
3871 }
3873 if (info) {
3874 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3875 } else {
3876 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3877 }
3878 if (result->is_valid()) {
3879 __ move(phys_reg, result);
3880 }
3881 return result;
3882 }
3884 void LIRGenerator::do_MemBar(MemBar* x) {
3885 if (os::is_MP()) {
3886 LIR_Code code = x->code();
3887 switch(code) {
3888 case lir_membar_acquire : __ membar_acquire(); break;
3889 case lir_membar_release : __ membar_release(); break;
3890 case lir_membar : __ membar(); break;
3891 case lir_membar_loadload : __ membar_loadload(); break;
3892 case lir_membar_storestore: __ membar_storestore(); break;
3893 case lir_membar_loadstore : __ membar_loadstore(); break;
3894 case lir_membar_storeload : __ membar_storeload(); break;
3895 default : ShouldNotReachHere(); break;
3896 }
3897 }
3898 }
3900 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3901 if (x->check_boolean()) {
3902 LIR_Opr value_fixed = rlock_byte(T_BYTE);
3903 if (TwoOperandLIRForm) {
3904 __ move(value, value_fixed);
3905 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3906 } else {
3907 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3908 }
3909 LIR_Opr klass = new_register(T_METADATA);
3910 __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3911 null_check_info = NULL;
3912 LIR_Opr layout = new_register(T_INT);
3913 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3914 int diffbit = Klass::layout_helper_boolean_diffbit();
3915 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3916 #ifdef MIPS
3917 guarantee(false, "not implemented yet for mips");
3918 // __ cmp();
3919 // __ cmov();
3920 #else
3921 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3922 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3923 #endif
3924 value = value_fixed;
3925 }
3926 return value;
3927 }