Fri, 29 Apr 2016 00:06:10 +0800
Added MIPS 64-bit port.
1 /*
2 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "c1/c1_Compilation.hpp"
33 #include "c1/c1_FrameMap.hpp"
34 #include "c1/c1_Instruction.hpp"
35 #include "c1/c1_LIRAssembler.hpp"
36 #include "c1/c1_LIRGenerator.hpp"
37 #include "c1/c1_ValueStack.hpp"
38 #include "ci/ciArrayKlass.hpp"
39 #include "ci/ciInstance.hpp"
40 #include "ci/ciObjArray.hpp"
41 #include "runtime/sharedRuntime.hpp"
42 #include "runtime/stubRoutines.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #include "utilities/macros.hpp"
45 #if INCLUDE_ALL_GCS
46 #include "gc_implementation/g1/heapRegion.hpp"
47 #endif // INCLUDE_ALL_GCS
49 #ifdef ASSERT
50 #define __ gen()->lir(__FILE__, __LINE__)->
51 #else
52 #define __ gen()->lir()->
53 #endif
55 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
56 #ifdef ARM
57 #define PATCHED_ADDR (204)
58 #else
59 #define PATCHED_ADDR (max_jint)
60 #endif
62 void PhiResolverState::reset(int max_vregs) {
63 // Initialize array sizes
64 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
65 _virtual_operands.trunc_to(0);
66 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
67 _other_operands.trunc_to(0);
68 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
69 _vreg_table.trunc_to(0);
70 }
74 //--------------------------------------------------------------
75 // PhiResolver
77 // Resolves cycles:
78 //
79 // r1 := r2 becomes temp := r1
80 // r2 := r1 r1 := r2
81 // r2 := temp
82 // and orders moves:
83 //
84 // r2 := r3 becomes r1 := r2
85 // r1 := r2 r2 := r3
87 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
88 : _gen(gen)
89 , _state(gen->resolver_state())
90 , _temp(LIR_OprFact::illegalOpr)
91 {
92 // reinitialize the shared state arrays
93 _state.reset(max_vregs);
94 }
97 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
98 assert(src->is_valid(), "");
99 assert(dest->is_valid(), "");
100 __ move(src, dest);
101 }
104 void PhiResolver::move_temp_to(LIR_Opr dest) {
105 assert(_temp->is_valid(), "");
106 emit_move(_temp, dest);
107 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
108 }
111 void PhiResolver::move_to_temp(LIR_Opr src) {
112 assert(_temp->is_illegal(), "");
113 _temp = _gen->new_register(src->type());
114 emit_move(src, _temp);
115 }
118 // Traverse assignment graph in depth first order and generate moves in post order
119 // ie. two assignments: b := c, a := b start with node c:
120 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
121 // Generates moves in this order: move b to a and move c to b
122 // ie. cycle a := b, b := a start with node a
123 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
124 // Generates moves in this order: move b to temp, move a to b, move temp to a
125 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
126 if (!dest->visited()) {
127 dest->set_visited();
128 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
129 move(dest, dest->destination_at(i));
130 }
131 } else if (!dest->start_node()) {
132 // cylce in graph detected
133 assert(_loop == NULL, "only one loop valid!");
134 _loop = dest;
135 move_to_temp(src->operand());
136 return;
137 } // else dest is a start node
139 if (!dest->assigned()) {
140 if (_loop == dest) {
141 move_temp_to(dest->operand());
142 dest->set_assigned();
143 } else if (src != NULL) {
144 emit_move(src->operand(), dest->operand());
145 dest->set_assigned();
146 }
147 }
148 }
151 PhiResolver::~PhiResolver() {
152 int i;
153 // resolve any cycles in moves from and to virtual registers
154 for (i = virtual_operands().length() - 1; i >= 0; i --) {
155 ResolveNode* node = virtual_operands()[i];
156 if (!node->visited()) {
157 _loop = NULL;
158 move(NULL, node);
159 node->set_start_node();
160 assert(_temp->is_illegal(), "move_temp_to() call missing");
161 }
162 }
164 // generate move for move from non virtual register to abitrary destination
165 for (i = other_operands().length() - 1; i >= 0; i --) {
166 ResolveNode* node = other_operands()[i];
167 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
168 emit_move(node->operand(), node->destination_at(j)->operand());
169 }
170 }
171 }
174 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
175 ResolveNode* node;
176 if (opr->is_virtual()) {
177 int vreg_num = opr->vreg_number();
178 node = vreg_table().at_grow(vreg_num, NULL);
179 assert(node == NULL || node->operand() == opr, "");
180 if (node == NULL) {
181 node = new ResolveNode(opr);
182 vreg_table()[vreg_num] = node;
183 }
184 // Make sure that all virtual operands show up in the list when
185 // they are used as the source of a move.
186 if (source && !virtual_operands().contains(node)) {
187 virtual_operands().append(node);
188 }
189 } else {
190 assert(source, "");
191 node = new ResolveNode(opr);
192 other_operands().append(node);
193 }
194 return node;
195 }
198 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
199 assert(dest->is_virtual(), "");
200 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
201 assert(src->is_valid(), "");
202 assert(dest->is_valid(), "");
203 ResolveNode* source = source_node(src);
204 source->append(destination_node(dest));
205 }
208 //--------------------------------------------------------------
209 // LIRItem
211 void LIRItem::set_result(LIR_Opr opr) {
212 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
213 value()->set_operand(opr);
215 if (opr->is_virtual()) {
216 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
217 }
219 _result = opr;
220 }
222 void LIRItem::load_item() {
223 if (result()->is_illegal()) {
224 // update the items result
225 _result = value()->operand();
226 }
227 if (!result()->is_register()) {
228 LIR_Opr reg = _gen->new_register(value()->type());
229 __ move(result(), reg);
230 if (result()->is_constant()) {
231 _result = reg;
232 } else {
233 set_result(reg);
234 }
235 }
236 }
239 void LIRItem::load_for_store(BasicType type) {
240 if (_gen->can_store_as_constant(value(), type)) {
241 _result = value()->operand();
242 if (!_result->is_constant()) {
243 _result = LIR_OprFact::value_type(value()->type());
244 }
245 } else if (type == T_BYTE || type == T_BOOLEAN) {
246 load_byte_item();
247 } else {
248 load_item();
249 }
250 }
252 void LIRItem::load_item_force(LIR_Opr reg) {
253 LIR_Opr r = result();
254 if (r != reg) {
255 #if !defined(ARM) && !defined(E500V2)
256 if (r->type() != reg->type()) {
257 // moves between different types need an intervening spill slot
258 r = _gen->force_to_spill(r, reg->type());
259 }
260 #endif
261 __ move(r, reg);
262 _result = reg;
263 }
264 }
266 ciObject* LIRItem::get_jobject_constant() const {
267 ObjectType* oc = type()->as_ObjectType();
268 if (oc) {
269 return oc->constant_value();
270 }
271 return NULL;
272 }
275 jint LIRItem::get_jint_constant() const {
276 assert(is_constant() && value() != NULL, "");
277 assert(type()->as_IntConstant() != NULL, "type check");
278 return type()->as_IntConstant()->value();
279 }
282 jint LIRItem::get_address_constant() const {
283 assert(is_constant() && value() != NULL, "");
284 assert(type()->as_AddressConstant() != NULL, "type check");
285 return type()->as_AddressConstant()->value();
286 }
289 jfloat LIRItem::get_jfloat_constant() const {
290 assert(is_constant() && value() != NULL, "");
291 assert(type()->as_FloatConstant() != NULL, "type check");
292 return type()->as_FloatConstant()->value();
293 }
296 jdouble LIRItem::get_jdouble_constant() const {
297 assert(is_constant() && value() != NULL, "");
298 assert(type()->as_DoubleConstant() != NULL, "type check");
299 return type()->as_DoubleConstant()->value();
300 }
303 jlong LIRItem::get_jlong_constant() const {
304 assert(is_constant() && value() != NULL, "");
305 assert(type()->as_LongConstant() != NULL, "type check");
306 return type()->as_LongConstant()->value();
307 }
311 //--------------------------------------------------------------
314 void LIRGenerator::init() {
315 _bs = Universe::heap()->barrier_set();
316 #ifdef MIPS64
317 assert(_bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
318 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
319 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
320 //_card_table_base = new LIR_Const((intptr_t)ct->byte_map_base);
321 // //FIXME, untested in 32bit. by aoqi
322 _card_table_base = new LIR_Const(ct->byte_map_base);
323 #endif
325 }
328 void LIRGenerator::block_do_prolog(BlockBegin* block) {
329 #ifndef PRODUCT
330 if (PrintIRWithLIR) {
331 block->print();
332 }
333 #endif
335 // set up the list of LIR instructions
336 assert(block->lir() == NULL, "LIR list already computed for this block");
337 _lir = new LIR_List(compilation(), block);
338 block->set_lir(_lir);
340 __ branch_destination(block->label());
342 if (LIRTraceExecution &&
343 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
344 !block->is_set(BlockBegin::exception_entry_flag)) {
345 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
346 trace_block_entry(block);
347 }
348 }
351 void LIRGenerator::block_do_epilog(BlockBegin* block) {
352 #ifndef PRODUCT
353 if (PrintIRWithLIR) {
354 tty->cr();
355 }
356 #endif
358 // LIR_Opr for unpinned constants shouldn't be referenced by other
359 // blocks so clear them out after processing the block.
360 for (int i = 0; i < _unpinned_constants.length(); i++) {
361 _unpinned_constants.at(i)->clear_operand();
362 }
363 _unpinned_constants.trunc_to(0);
365 // clear our any registers for other local constants
366 _constants.trunc_to(0);
367 _reg_for_constants.trunc_to(0);
368 }
371 void LIRGenerator::block_do(BlockBegin* block) {
372 CHECK_BAILOUT();
374 block_do_prolog(block);
375 set_block(block);
377 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
378 if (instr->is_pinned()) do_root(instr);
379 }
381 set_block(NULL);
382 block_do_epilog(block);
383 }
386 //-------------------------LIRGenerator-----------------------------
388 // This is where the tree-walk starts; instr must be root;
389 void LIRGenerator::do_root(Value instr) {
390 CHECK_BAILOUT();
392 InstructionMark im(compilation(), instr);
394 assert(instr->is_pinned(), "use only with roots");
395 assert(instr->subst() == instr, "shouldn't have missed substitution");
397 instr->visit(this);
399 assert(!instr->has_uses() || instr->operand()->is_valid() ||
400 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
401 }
404 // This is called for each node in tree; the walk stops if a root is reached
405 void LIRGenerator::walk(Value instr) {
406 InstructionMark im(compilation(), instr);
407 //stop walk when encounter a root
408 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
409 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
410 } else {
411 assert(instr->subst() == instr, "shouldn't have missed substitution");
412 instr->visit(this);
413 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
414 }
415 }
418 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
419 assert(state != NULL, "state must be defined");
421 #ifndef PRODUCT
422 state->verify();
423 #endif
425 ValueStack* s = state;
426 for_each_state(s) {
427 if (s->kind() == ValueStack::EmptyExceptionState) {
428 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
429 continue;
430 }
432 int index;
433 Value value;
434 for_each_stack_value(s, index, value) {
435 assert(value->subst() == value, "missed substitution");
436 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
437 walk(value);
438 assert(value->operand()->is_valid(), "must be evaluated now");
439 }
440 }
442 int bci = s->bci();
443 IRScope* scope = s->scope();
444 ciMethod* method = scope->method();
446 MethodLivenessResult liveness = method->liveness_at_bci(bci);
447 if (bci == SynchronizationEntryBCI) {
448 if (x->as_ExceptionObject() || x->as_Throw()) {
449 // all locals are dead on exit from the synthetic unlocker
450 liveness.clear();
451 } else {
452 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
453 }
454 }
455 if (!liveness.is_valid()) {
456 // Degenerate or breakpointed method.
457 bailout("Degenerate or breakpointed method");
458 } else {
459 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
460 for_each_local_value(s, index, value) {
461 assert(value->subst() == value, "missed substition");
462 if (liveness.at(index) && !value->type()->is_illegal()) {
463 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
464 walk(value);
465 assert(value->operand()->is_valid(), "must be evaluated now");
466 }
467 } else {
468 // NULL out this local so that linear scan can assume that all non-NULL values are live.
469 s->invalidate_local(index);
470 }
471 }
472 }
473 }
475 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
476 }
479 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
480 return state_for(x, x->exception_state());
481 }
484 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) {
485 if (!obj->is_loaded() || PatchALot) {
486 assert(info != NULL, "info must be set if class is not loaded");
487 __ klass2reg_patch(NULL, r, info);
488 } else {
489 // no patching needed
490 __ metadata2reg(obj->constant_encoding(), r);
491 }
492 }
495 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
496 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
497 CodeStub* stub = new RangeCheckStub(range_check_info, index);
498 if (index->is_constant()) {
499 #ifndef MIPS64
500 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
501 index->as_jint(), null_check_info);
502 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
503 #else
504 LIR_Opr left = LIR_OprFact::address(new LIR_Address(array, arrayOopDesc::length_offset_in_bytes(), T_INT));
505 LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
506 __ null_check_for_branch(lir_cond_belowEqual, left, right, null_check_info);
507 __ branch(lir_cond_belowEqual, left, right ,T_INT, stub); // forward branch
508 #endif
509 } else {
510 #ifndef MIPS64
511 cmp_reg_mem(lir_cond_aboveEqual, index, array,
512 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
513 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
514 #else
515 LIR_Opr left = index;
516 LIR_Opr right = LIR_OprFact::address(new LIR_Address( array, arrayOopDesc::length_offset_in_bytes(), T_INT));
517 __ null_check_for_branch(lir_cond_aboveEqual, left, right, null_check_info);
518 __ branch(lir_cond_aboveEqual,left, right ,T_INT, stub); // forward branch
519 #endif
520 }
521 }
524 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
525 CodeStub* stub = new RangeCheckStub(info, index, true);
526 if (index->is_constant()) {
527 #ifndef MIPS64
528 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
529 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
530 #else
531 LIR_Opr left = LIR_OprFact::address(new LIR_Address(buffer, java_nio_Buffer::limit_offset(),T_INT));
532 LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
533 __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
534 __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
535 #endif
536 } else {
537 #ifndef MIPS64
538 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
539 java_nio_Buffer::limit_offset(), T_INT, info);
540 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
541 #else
542 LIR_Opr left = index;
543 LIR_Opr right = LIR_OprFact::address(new LIR_Address( buffer, java_nio_Buffer::limit_offset(), T_INT));
544 __ null_check_for_branch(lir_cond_aboveEqual, left, right, info);
545 __ branch(lir_cond_aboveEqual,left, right ,T_INT, stub); // forward branch
546 #endif
547 }
548 __ move(index, result);
549 }
553 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
554 LIR_Opr result_op = result;
555 LIR_Opr left_op = left;
556 LIR_Opr right_op = right;
558 if (TwoOperandLIRForm && left_op != result_op) {
559 assert(right_op != result_op, "malformed");
560 __ move(left_op, result_op);
561 left_op = result_op;
562 }
564 switch(code) {
565 case Bytecodes::_dadd:
566 case Bytecodes::_fadd:
567 case Bytecodes::_ladd:
568 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
569 case Bytecodes::_fmul:
570 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
572 case Bytecodes::_dmul:
573 {
574 if (is_strictfp) {
575 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
576 } else {
577 __ mul(left_op, right_op, result_op); break;
578 }
579 }
580 break;
582 case Bytecodes::_imul:
583 {
584 bool did_strength_reduce = false;
586 if (right->is_constant()) {
587 int c = right->as_jint();
588 if (is_power_of_2(c)) {
589 // do not need tmp here
590 __ shift_left(left_op, exact_log2(c), result_op);
591 did_strength_reduce = true;
592 } else {
593 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
594 }
595 }
596 // we couldn't strength reduce so just emit the multiply
597 if (!did_strength_reduce) {
598 __ mul(left_op, right_op, result_op);
599 }
600 }
601 break;
603 case Bytecodes::_dsub:
604 case Bytecodes::_fsub:
605 case Bytecodes::_lsub:
606 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
608 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
609 // ldiv and lrem are implemented with a direct runtime call
611 case Bytecodes::_ddiv:
612 {
613 if (is_strictfp) {
614 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
615 } else {
616 __ div (left_op, right_op, result_op); break;
617 }
618 }
619 break;
621 case Bytecodes::_drem:
622 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
624 default: ShouldNotReachHere();
625 }
626 }
629 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
630 arithmetic_op(code, result, left, right, false, tmp);
631 }
634 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
635 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
636 }
639 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
640 arithmetic_op(code, result, left, right, is_strictfp, tmp);
641 }
644 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
645 if (TwoOperandLIRForm && value != result_op) {
646 assert(count != result_op, "malformed");
647 __ move(value, result_op);
648 value = result_op;
649 }
651 assert(count->is_constant() || count->is_register(), "must be");
652 switch(code) {
653 case Bytecodes::_ishl:
654 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
655 case Bytecodes::_ishr:
656 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
657 case Bytecodes::_iushr:
658 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
659 default: ShouldNotReachHere();
660 }
661 }
664 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
665 if (TwoOperandLIRForm && left_op != result_op) {
666 assert(right_op != result_op, "malformed");
667 __ move(left_op, result_op);
668 left_op = result_op;
669 }
671 switch(code) {
672 case Bytecodes::_iand:
673 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
675 case Bytecodes::_ior:
676 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
678 case Bytecodes::_ixor:
679 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
681 default: ShouldNotReachHere();
682 }
683 }
686 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
687 if (!GenerateSynchronizationCode) return;
688 // for slow path, use debug info for state after successful locking
689 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
690 __ load_stack_address_monitor(monitor_no, lock);
691 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
692 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
693 }
696 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
697 if (!GenerateSynchronizationCode) return;
698 // setup registers
699 LIR_Opr hdr = lock;
700 lock = new_hdr;
701 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
702 __ load_stack_address_monitor(monitor_no, lock);
703 __ unlock_object(hdr, object, lock, scratch, slow_path);
704 }
706 #ifndef MIPS64
707 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
708 #else
709 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3,
710 LIR_Opr scratch4, LIR_Opr scratch5, LIR_Opr scratch6,LIR_Opr klass_reg, CodeEmitInfo* info) {
711 #endif
713 klass2reg_with_patching(klass_reg, klass, info);
714 // If klass is not loaded we do not know if the klass has finalizers:
715 if (UseFastNewInstance && klass->is_loaded()
716 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
718 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
720 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
722 assert(klass->is_loaded(), "must be loaded");
723 // allocate space for instance
724 assert(klass->size_helper() >= 0, "illegal instance size");
725 const int instance_size = align_object_size(klass->size_helper());
726 #ifndef MIPS64
727 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
728 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
729 #else
730 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, scratch5, scratch6,
731 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
733 #endif
734 } else {
735 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
736 #ifndef MIPS64
737 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
738 __ branch_destination(slow_path->continuation());
739 #else
740 __ branch(lir_cond_always, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, T_ILLEGAL, slow_path);
741 __ branch_destination(slow_path->continuation());
742 #endif
743 }
744 }
747 static bool is_constant_zero(Instruction* inst) {
748 IntConstant* c = inst->type()->as_IntConstant();
749 if (c) {
750 return (c->value() == 0);
751 }
752 return false;
753 }
756 static bool positive_constant(Instruction* inst) {
757 IntConstant* c = inst->type()->as_IntConstant();
758 if (c) {
759 return (c->value() >= 0);
760 }
761 return false;
762 }
765 static ciArrayKlass* as_array_klass(ciType* type) {
766 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
767 return (ciArrayKlass*)type;
768 } else {
769 return NULL;
770 }
771 }
773 static ciType* phi_declared_type(Phi* phi) {
774 ciType* t = phi->operand_at(0)->declared_type();
775 if (t == NULL) {
776 return NULL;
777 }
778 for(int i = 1; i < phi->operand_count(); i++) {
779 if (t != phi->operand_at(i)->declared_type()) {
780 return NULL;
781 }
782 }
783 return t;
784 }
786 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
787 Instruction* src = x->argument_at(0);
788 Instruction* src_pos = x->argument_at(1);
789 Instruction* dst = x->argument_at(2);
790 Instruction* dst_pos = x->argument_at(3);
791 Instruction* length = x->argument_at(4);
793 // first try to identify the likely type of the arrays involved
794 ciArrayKlass* expected_type = NULL;
795 bool is_exact = false, src_objarray = false, dst_objarray = false;
796 {
797 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
798 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
799 Phi* phi;
800 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
801 src_declared_type = as_array_klass(phi_declared_type(phi));
802 }
803 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
804 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
805 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
806 dst_declared_type = as_array_klass(phi_declared_type(phi));
807 }
809 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
810 // the types exactly match so the type is fully known
811 is_exact = true;
812 expected_type = src_exact_type;
813 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
814 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
815 ciArrayKlass* src_type = NULL;
816 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
817 src_type = (ciArrayKlass*) src_exact_type;
818 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
819 src_type = (ciArrayKlass*) src_declared_type;
820 }
821 if (src_type != NULL) {
822 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
823 is_exact = true;
824 expected_type = dst_type;
825 }
826 }
827 }
828 // at least pass along a good guess
829 if (expected_type == NULL) expected_type = dst_exact_type;
830 if (expected_type == NULL) expected_type = src_declared_type;
831 if (expected_type == NULL) expected_type = dst_declared_type;
833 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
834 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
835 }
837 // if a probable array type has been identified, figure out if any
838 // of the required checks for a fast case can be elided.
839 int flags = LIR_OpArrayCopy::all_flags;
841 if (!src_objarray)
842 flags &= ~LIR_OpArrayCopy::src_objarray;
843 if (!dst_objarray)
844 flags &= ~LIR_OpArrayCopy::dst_objarray;
846 if (!x->arg_needs_null_check(0))
847 flags &= ~LIR_OpArrayCopy::src_null_check;
848 if (!x->arg_needs_null_check(2))
849 flags &= ~LIR_OpArrayCopy::dst_null_check;
852 if (expected_type != NULL) {
853 Value length_limit = NULL;
855 IfOp* ifop = length->as_IfOp();
856 if (ifop != NULL) {
857 // look for expressions like min(v, a.length) which ends up as
858 // x > y ? y : x or x >= y ? y : x
859 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
860 ifop->x() == ifop->fval() &&
861 ifop->y() == ifop->tval()) {
862 length_limit = ifop->y();
863 }
864 }
866 // try to skip null checks and range checks
867 NewArray* src_array = src->as_NewArray();
868 if (src_array != NULL) {
869 flags &= ~LIR_OpArrayCopy::src_null_check;
870 if (length_limit != NULL &&
871 src_array->length() == length_limit &&
872 is_constant_zero(src_pos)) {
873 flags &= ~LIR_OpArrayCopy::src_range_check;
874 }
875 }
877 NewArray* dst_array = dst->as_NewArray();
878 if (dst_array != NULL) {
879 flags &= ~LIR_OpArrayCopy::dst_null_check;
880 if (length_limit != NULL &&
881 dst_array->length() == length_limit &&
882 is_constant_zero(dst_pos)) {
883 flags &= ~LIR_OpArrayCopy::dst_range_check;
884 }
885 }
887 // check from incoming constant values
888 if (positive_constant(src_pos))
889 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
890 if (positive_constant(dst_pos))
891 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
892 if (positive_constant(length))
893 flags &= ~LIR_OpArrayCopy::length_positive_check;
895 // see if the range check can be elided, which might also imply
896 // that src or dst is non-null.
897 ArrayLength* al = length->as_ArrayLength();
898 if (al != NULL) {
899 if (al->array() == src) {
900 // it's the length of the source array
901 flags &= ~LIR_OpArrayCopy::length_positive_check;
902 flags &= ~LIR_OpArrayCopy::src_null_check;
903 if (is_constant_zero(src_pos))
904 flags &= ~LIR_OpArrayCopy::src_range_check;
905 }
906 if (al->array() == dst) {
907 // it's the length of the destination array
908 flags &= ~LIR_OpArrayCopy::length_positive_check;
909 flags &= ~LIR_OpArrayCopy::dst_null_check;
910 if (is_constant_zero(dst_pos))
911 flags &= ~LIR_OpArrayCopy::dst_range_check;
912 }
913 }
914 if (is_exact) {
915 flags &= ~LIR_OpArrayCopy::type_check;
916 }
917 }
919 IntConstant* src_int = src_pos->type()->as_IntConstant();
920 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
921 if (src_int && dst_int) {
922 int s_offs = src_int->value();
923 int d_offs = dst_int->value();
924 if (src_int->value() >= dst_int->value()) {
925 flags &= ~LIR_OpArrayCopy::overlapping;
926 }
927 if (expected_type != NULL) {
928 BasicType t = expected_type->element_type()->basic_type();
929 int element_size = type2aelembytes(t);
930 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
931 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
932 flags &= ~LIR_OpArrayCopy::unaligned;
933 }
934 }
935 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
936 // src and dest positions are the same, or dst is zero so assume
937 // nonoverlapping copy.
938 flags &= ~LIR_OpArrayCopy::overlapping;
939 }
941 if (src == dst) {
942 // moving within a single array so no type checks are needed
943 if (flags & LIR_OpArrayCopy::type_check) {
944 flags &= ~LIR_OpArrayCopy::type_check;
945 }
946 }
947 *flagsp = flags;
948 *expected_typep = (ciArrayKlass*)expected_type;
949 }
952 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
953 assert(opr->is_register(), "why spill if item is not register?");
955 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
956 LIR_Opr result = new_register(T_FLOAT);
957 set_vreg_flag(result, must_start_in_memory);
958 assert(opr->is_register(), "only a register can be spilled");
959 assert(opr->value_type()->is_float(), "rounding only for floats available");
960 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
961 return result;
962 }
963 return opr;
964 }
967 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
968 assert(type2size[t] == type2size[value->type()],
969 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
970 if (!value->is_register()) {
971 // force into a register
972 LIR_Opr r = new_register(value->type());
973 __ move(value, r);
974 value = r;
975 }
977 // create a spill location
978 LIR_Opr tmp = new_register(t);
979 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
981 // move from register to spill
982 __ move(value, tmp);
983 return tmp;
984 }
985 #ifndef MIPS64
986 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
987 if (if_instr->should_profile()) {
988 ciMethod* method = if_instr->profiled_method();
989 assert(method != NULL, "method should be set if branch is profiled");
990 ciMethodData* md = method->method_data_or_null();
991 assert(md != NULL, "Sanity");
992 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
993 assert(data != NULL, "must have profiling data");
994 assert(data->is_BranchData(), "need BranchData for two-way branches");
995 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
996 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
997 if (if_instr->is_swapped()) {
998 int t = taken_count_offset;
999 taken_count_offset = not_taken_count_offset;
1000 not_taken_count_offset = t;
1001 }
1003 LIR_Opr md_reg = new_register(T_METADATA);
1004 __ metadata2reg(md->constant_encoding(), md_reg);
1006 LIR_Opr data_offset_reg = new_pointer_register();
1007 __ cmove(lir_cond(cond),
1008 LIR_OprFact::intptrConst(taken_count_offset),
1009 LIR_OprFact::intptrConst(not_taken_count_offset),
1010 data_offset_reg, as_BasicType(if_instr->x()->type()));
1012 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
1013 LIR_Opr data_reg = new_pointer_register();
1014 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
1015 __ move(data_addr, data_reg);
1016 // Use leal instead of add to avoid destroying condition codes on x86
1017 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
1018 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
1019 __ move(data_reg, data_addr);
1020 }
1021 }
1022 #else
1023 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond , LIR_Opr left, LIR_Opr right) {
1024 if (if_instr->should_profile()) {
1025 ciMethod* method = if_instr->profiled_method();
1026 assert(method != NULL, "method should be set if branch is profiled");
1027 ciMethodData* md = method->method_data_or_null();
1028 if (md == NULL) {
1029 bailout("out of memory building methodDataOop");
1030 return;
1031 }
1032 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
1033 assert(data != NULL, "must have profiling data");
1034 assert(data->is_BranchData(), "need BranchData for two-way branches");
1035 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
1036 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
1037 if (if_instr->is_swapped()) {
1038 int t = taken_count_offset;
1039 taken_count_offset = not_taken_count_offset;
1040 not_taken_count_offset = t;
1041 }
1042 LIR_Opr md_reg = new_register(T_METADATA);
1043 __ metadata2reg(md->constant_encoding(), md_reg);
1044 //__ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
1045 LIR_Opr data_offset_reg = new_pointer_register();
1047 LIR_Opr opr1 = LIR_OprFact::intConst(taken_count_offset);
1048 LIR_Opr opr2 = LIR_OprFact::intConst(not_taken_count_offset);
1049 LabelObj* skip = new LabelObj();
1051 __ move(opr1, data_offset_reg);
1052 __ branch( lir_cond(cond), left, right, skip->label());
1053 __ move(opr2, data_offset_reg);
1054 __ branch_destination(skip->label());
1056 LIR_Opr data_reg = new_pointer_register();
1057 LIR_Opr tmp_reg = new_pointer_register();
1058 // LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
1059 __ move(data_offset_reg, tmp_reg);
1060 __ add(tmp_reg, md_reg, tmp_reg);
1061 LIR_Address* data_addr = new LIR_Address(tmp_reg, 0, T_INT);
1062 __ move(LIR_OprFact::address(data_addr), data_reg);
1063 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
1064 // Use leal instead of add to avoid destroying condition codes on x86
1065 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
1066 __ move(data_reg, LIR_OprFact::address(data_addr));
1067 }
1068 }
1070 #endif
1072 // Phi technique:
1073 // This is about passing live values from one basic block to the other.
1074 // In code generated with Java it is rather rare that more than one
1075 // value is on the stack from one basic block to the other.
1076 // We optimize our technique for efficient passing of one value
1077 // (of type long, int, double..) but it can be extended.
1078 // When entering or leaving a basic block, all registers and all spill
1079 // slots are release and empty. We use the released registers
1080 // and spill slots to pass the live values from one block
1081 // to the other. The topmost value, i.e., the value on TOS of expression
1082 // stack is passed in registers. All other values are stored in spilling
1083 // area. Every Phi has an index which designates its spill slot
1084 // At exit of a basic block, we fill the register(s) and spill slots.
1085 // At entry of a basic block, the block_prolog sets up the content of phi nodes
1086 // and locks necessary registers and spilling slots.
1089 // move current value to referenced phi function
1090 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
1091 Phi* phi = sux_val->as_Phi();
1092 // cur_val can be null without phi being null in conjunction with inlining
1093 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1094 LIR_Opr operand = cur_val->operand();
1095 if (cur_val->operand()->is_illegal()) {
1096 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1097 "these can be produced lazily");
1098 operand = operand_for_instruction(cur_val);
1099 }
1100 resolver->move(operand, operand_for_instruction(phi));
1101 }
1102 }
1105 // Moves all stack values into their PHI position
1106 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1107 BlockBegin* bb = block();
1108 if (bb->number_of_sux() == 1) {
1109 BlockBegin* sux = bb->sux_at(0);
1110 assert(sux->number_of_preds() > 0, "invalid CFG");
1112 // a block with only one predecessor never has phi functions
1113 if (sux->number_of_preds() > 1) {
1114 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1115 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1117 ValueStack* sux_state = sux->state();
1118 Value sux_value;
1119 int index;
1121 assert(cur_state->scope() == sux_state->scope(), "not matching");
1122 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1123 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1125 for_each_stack_value(sux_state, index, sux_value) {
1126 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1127 }
1129 for_each_local_value(sux_state, index, sux_value) {
1130 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1131 }
1133 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1134 }
1135 }
1136 }
1139 LIR_Opr LIRGenerator::new_register(BasicType type) {
1140 int vreg = _virtual_register_number;
1141 // add a little fudge factor for the bailout, since the bailout is
1142 // only checked periodically. This gives a few extra registers to
1143 // hand out before we really run out, which helps us keep from
1144 // tripping over assertions.
1145 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1146 bailout("out of virtual registers");
1147 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1148 // wrap it around
1149 _virtual_register_number = LIR_OprDesc::vreg_base;
1150 }
1151 }
1152 _virtual_register_number += 1;
1153 return LIR_OprFact::virtual_register(vreg, type);
1154 }
1157 // Try to lock using register in hint
1158 LIR_Opr LIRGenerator::rlock(Value instr) {
1159 return new_register(instr->type());
1160 }
1163 // does an rlock and sets result
1164 LIR_Opr LIRGenerator::rlock_result(Value x) {
1165 LIR_Opr reg = rlock(x);
1166 set_result(x, reg);
1167 return reg;
1168 }
1171 // does an rlock and sets result
1172 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1173 LIR_Opr reg;
1174 switch (type) {
1175 case T_BYTE:
1176 case T_BOOLEAN:
1177 reg = rlock_byte(type);
1178 break;
1179 default:
1180 reg = rlock(x);
1181 break;
1182 }
1184 set_result(x, reg);
1185 return reg;
1186 }
1189 //---------------------------------------------------------------------
1190 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1191 ObjectType* oc = value->type()->as_ObjectType();
1192 if (oc) {
1193 return oc->constant_value();
1194 }
1195 return NULL;
1196 }
1197 #ifdef MIPS64
1198 void LIRGenerator::write_barrier(LIR_Opr addr) {
1199 if (addr->is_address()) {
1200 LIR_Address* address = (LIR_Address*)addr;
1201 LIR_Opr ptr = new_register(T_OBJECT);
1202 if (!address->index()->is_valid() && address->disp() == 0) {
1203 __ move(address->base(), ptr);
1204 } else {
1205 __ leal(addr, ptr);
1206 }
1207 addr = ptr;
1208 }
1209 assert(addr->is_register(), "must be a register at this point");
1211 LIR_Opr tmp = new_pointer_register();
1212 if (TwoOperandLIRForm) {
1213 __ move(addr, tmp);
1214 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1215 } else {
1216 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1217 }
1218 if (can_inline_as_constant(card_table_base())) {
1219 __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, card_table_base()->as_jint(), T_BYTE));
1220 } else {
1221 __ add(tmp, load_constant(card_table_base()), tmp);
1222 __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, 0, T_BYTE));
1223 }
1224 }
1225 #endif
1228 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1229 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1230 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1232 // no moves are created for phi functions at the begin of exception
1233 // handlers, so assign operands manually here
1234 for_each_phi_fun(block(), phi,
1235 operand_for_instruction(phi));
1237 LIR_Opr thread_reg = getThreadPointer();
1238 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1239 exceptionOopOpr());
1240 __ move_wide(LIR_OprFact::oopConst(NULL),
1241 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1242 __ move_wide(LIR_OprFact::oopConst(NULL),
1243 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1245 LIR_Opr result = new_register(T_OBJECT);
1246 __ move(exceptionOopOpr(), result);
1247 set_result(x, result);
1248 }
1251 //----------------------------------------------------------------------
1252 //----------------------------------------------------------------------
1253 //----------------------------------------------------------------------
1254 //----------------------------------------------------------------------
1255 // visitor functions
1256 //----------------------------------------------------------------------
1257 //----------------------------------------------------------------------
1258 //----------------------------------------------------------------------
1259 //----------------------------------------------------------------------
1261 void LIRGenerator::do_Phi(Phi* x) {
1262 // phi functions are never visited directly
1263 ShouldNotReachHere();
1264 }
1267 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1268 void LIRGenerator::do_Constant(Constant* x) {
1269 if (x->state_before() != NULL) {
1270 // Any constant with a ValueStack requires patching so emit the patch here
1271 LIR_Opr reg = rlock_result(x);
1272 CodeEmitInfo* info = state_for(x, x->state_before());
1273 __ oop2reg_patch(NULL, reg, info);
1274 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1275 if (!x->is_pinned()) {
1276 // unpinned constants are handled specially so that they can be
1277 // put into registers when they are used multiple times within a
1278 // block. After the block completes their operand will be
1279 // cleared so that other blocks can't refer to that register.
1280 set_result(x, load_constant(x));
1281 } else {
1282 LIR_Opr res = x->operand();
1283 if (!res->is_valid()) {
1284 res = LIR_OprFact::value_type(x->type());
1285 }
1286 if (res->is_constant()) {
1287 LIR_Opr reg = rlock_result(x);
1288 __ move(res, reg);
1289 } else {
1290 set_result(x, res);
1291 }
1292 }
1293 } else {
1294 set_result(x, LIR_OprFact::value_type(x->type()));
1295 }
1296 }
1299 void LIRGenerator::do_Local(Local* x) {
1300 // operand_for_instruction has the side effect of setting the result
1301 // so there's no need to do it here.
1302 operand_for_instruction(x);
1303 }
1306 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1307 Unimplemented();
1308 }
1311 void LIRGenerator::do_Return(Return* x) {
1312 if (compilation()->env()->dtrace_method_probes()) {
1313 BasicTypeList signature;
1314 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1315 signature.append(T_METADATA); // Method*
1316 LIR_OprList* args = new LIR_OprList();
1317 args->append(getThreadPointer());
1318 LIR_Opr meth = new_register(T_METADATA);
1319 __ metadata2reg(method()->constant_encoding(), meth);
1320 args->append(meth);
1321 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1322 }
1324 if (x->type()->is_void()) {
1325 __ return_op(LIR_OprFact::illegalOpr);
1326 } else {
1327 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1328 LIRItem result(x->result(), this);
1330 result.load_item_force(reg);
1331 __ return_op(result.result());
1332 }
1333 set_no_result(x);
1334 }
1336 // Examble: ref.get()
1337 // Combination of LoadField and g1 pre-write barrier
1338 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1340 const int referent_offset = java_lang_ref_Reference::referent_offset;
1341 guarantee(referent_offset > 0, "referent offset not initialized");
1343 assert(x->number_of_arguments() == 1, "wrong type");
1345 LIRItem reference(x->argument_at(0), this);
1346 reference.load_item();
1348 // need to perform the null check on the reference objecy
1349 CodeEmitInfo* info = NULL;
1350 if (x->needs_null_check()) {
1351 info = state_for(x);
1352 }
1354 LIR_Address* referent_field_adr =
1355 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1357 LIR_Opr result = rlock_result(x);
1359 __ load(referent_field_adr, result, info);
1361 // Register the value in the referent field with the pre-barrier
1362 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1363 result /* pre_val */,
1364 false /* do_load */,
1365 false /* patch */,
1366 NULL /* info */);
1367 }
1369 // Example: clazz.isInstance(object)
1370 void LIRGenerator::do_isInstance(Intrinsic* x) {
1371 assert(x->number_of_arguments() == 2, "wrong type");
1373 // TODO could try to substitute this node with an equivalent InstanceOf
1374 // if clazz is known to be a constant Class. This will pick up newly found
1375 // constants after HIR construction. I'll leave this to a future change.
1377 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1378 // could follow the aastore example in a future change.
1380 LIRItem clazz(x->argument_at(0), this);
1381 LIRItem object(x->argument_at(1), this);
1382 clazz.load_item();
1383 object.load_item();
1384 LIR_Opr result = rlock_result(x);
1386 // need to perform null check on clazz
1387 if (x->needs_null_check()) {
1388 CodeEmitInfo* info = state_for(x);
1389 __ null_check(clazz.result(), info);
1390 }
1392 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1393 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1394 x->type(),
1395 NULL); // NULL CodeEmitInfo results in a leaf call
1396 __ move(call_result, result);
1397 }
1399 // Example: object.getClass ()
1400 void LIRGenerator::do_getClass(Intrinsic* x) {
1401 assert(x->number_of_arguments() == 1, "wrong type");
1403 LIRItem rcvr(x->argument_at(0), this);
1404 rcvr.load_item();
1405 LIR_Opr temp = new_register(T_METADATA);
1406 LIR_Opr result = rlock_result(x);
1408 // need to perform the null check on the rcvr
1409 CodeEmitInfo* info = NULL;
1410 if (x->needs_null_check()) {
1411 info = state_for(x);
1412 }
1414 // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1415 // meaning of these two is mixed up (see JDK-8026837).
1416 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1417 __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1418 }
1421 // Example: Thread.currentThread()
1422 void LIRGenerator::do_currentThread(Intrinsic* x) {
1423 assert(x->number_of_arguments() == 0, "wrong type");
1424 LIR_Opr reg = rlock_result(x);
1425 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1426 }
1429 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1430 assert(x->number_of_arguments() == 1, "wrong type");
1431 LIRItem receiver(x->argument_at(0), this);
1433 receiver.load_item();
1434 BasicTypeList signature;
1435 signature.append(T_OBJECT); // receiver
1436 LIR_OprList* args = new LIR_OprList();
1437 args->append(receiver.result());
1438 CodeEmitInfo* info = state_for(x, x->state());
1439 call_runtime(&signature, args,
1440 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1441 voidType, info);
1443 set_no_result(x);
1444 }
1447 //------------------------local access--------------------------------------
1449 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1450 if (x->operand()->is_illegal()) {
1451 Constant* c = x->as_Constant();
1452 if (c != NULL) {
1453 x->set_operand(LIR_OprFact::value_type(c->type()));
1454 } else {
1455 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1456 // allocate a virtual register for this local or phi
1457 x->set_operand(rlock(x));
1458 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1459 }
1460 }
1461 return x->operand();
1462 }
1465 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1466 if (opr->is_virtual()) {
1467 return instruction_for_vreg(opr->vreg_number());
1468 }
1469 return NULL;
1470 }
1473 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1474 if (reg_num < _instruction_for_operand.length()) {
1475 return _instruction_for_operand.at(reg_num);
1476 }
1477 return NULL;
1478 }
1481 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1482 if (_vreg_flags.size_in_bits() == 0) {
1483 BitMap2D temp(100, num_vreg_flags);
1484 temp.clear();
1485 _vreg_flags = temp;
1486 }
1487 _vreg_flags.at_put_grow(vreg_num, f, true);
1488 }
1490 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1491 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1492 return false;
1493 }
1494 return _vreg_flags.at(vreg_num, f);
1495 }
1498 // Block local constant handling. This code is useful for keeping
1499 // unpinned constants and constants which aren't exposed in the IR in
1500 // registers. Unpinned Constant instructions have their operands
1501 // cleared when the block is finished so that other blocks can't end
1502 // up referring to their registers.
1504 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1505 assert(!x->is_pinned(), "only for unpinned constants");
1506 _unpinned_constants.append(x);
1507 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1508 }
1511 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1512 BasicType t = c->type();
1513 for (int i = 0; i < _constants.length(); i++) {
1514 LIR_Const* other = _constants.at(i);
1515 if (t == other->type()) {
1516 switch (t) {
1517 case T_INT:
1518 case T_FLOAT:
1519 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1520 break;
1521 case T_LONG:
1522 case T_DOUBLE:
1523 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1524 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1525 break;
1526 case T_OBJECT:
1527 if (c->as_jobject() != other->as_jobject()) continue;
1528 break;
1529 }
1530 return _reg_for_constants.at(i);
1531 }
1532 }
1534 LIR_Opr result = new_register(t);
1535 __ move((LIR_Opr)c, result);
1536 _constants.append(c);
1537 _reg_for_constants.append(result);
1538 return result;
1539 }
1541 // Various barriers
1543 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1544 bool do_load, bool patch, CodeEmitInfo* info) {
1545 // Do the pre-write barrier, if any.
1546 switch (_bs->kind()) {
1547 #if INCLUDE_ALL_GCS
1548 case BarrierSet::G1SATBCT:
1549 case BarrierSet::G1SATBCTLogging:
1550 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1551 break;
1552 #endif // INCLUDE_ALL_GCS
1553 case BarrierSet::CardTableModRef:
1554 case BarrierSet::CardTableExtension:
1555 // No pre barriers
1556 break;
1557 case BarrierSet::ModRef:
1558 case BarrierSet::Other:
1559 // No pre barriers
1560 break;
1561 default :
1562 ShouldNotReachHere();
1564 }
1565 }
1567 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1568 switch (_bs->kind()) {
1569 #if INCLUDE_ALL_GCS
1570 case BarrierSet::G1SATBCT:
1571 case BarrierSet::G1SATBCTLogging:
1572 G1SATBCardTableModRef_post_barrier(addr, new_val);
1573 break;
1574 #endif // INCLUDE_ALL_GCS
1575 case BarrierSet::CardTableModRef:
1576 case BarrierSet::CardTableExtension:
1577 CardTableModRef_post_barrier(addr, new_val);
1578 break;
1579 case BarrierSet::ModRef:
1580 case BarrierSet::Other:
1581 // No post barriers
1582 break;
1583 default :
1584 ShouldNotReachHere();
1585 }
1586 }
1588 ////////////////////////////////////////////////////////////////////////
1589 #if INCLUDE_ALL_GCS
1591 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1592 bool do_load, bool patch, CodeEmitInfo* info) {
1593 // First we test whether marking is in progress.
1594 BasicType flag_type;
1595 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1596 flag_type = T_INT;
1597 } else {
1598 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1599 "Assumption");
1600 flag_type = T_BYTE;
1601 }
1602 LIR_Opr thrd = getThreadPointer();
1603 LIR_Address* mark_active_flag_addr =
1604 new LIR_Address(thrd,
1605 in_bytes(JavaThread::satb_mark_queue_offset() +
1606 PtrQueue::byte_offset_of_active()),
1607 flag_type);
1608 // Read the marking-in-progress flag.
1609 LIR_Opr flag_val = new_register(T_INT);
1610 __ load(mark_active_flag_addr, flag_val);
1611 #ifndef MIPS64
1612 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1613 #endif
1615 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1617 CodeStub* slow;
1619 if (do_load) {
1620 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1621 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1623 if (patch)
1624 pre_val_patch_code = lir_patch_normal;
1626 pre_val = new_register(T_OBJECT);
1628 if (!addr_opr->is_address()) {
1629 assert(addr_opr->is_register(), "must be");
1630 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1631 }
1632 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1633 } else {
1634 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1635 assert(pre_val->is_register(), "must be");
1636 assert(pre_val->type() == T_OBJECT, "must be an object");
1637 assert(info == NULL, "sanity");
1639 slow = new G1PreBarrierStub(pre_val);
1640 }
1642 #ifndef MIPS64
1643 __ branch(lir_cond_notEqual, T_INT, slow);
1644 #else
1645 __ branch(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0), T_INT, slow);
1646 #endif
1647 __ branch_destination(slow->continuation());
1648 }
1650 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1651 // If the "new_val" is a constant NULL, no barrier is necessary.
1652 if (new_val->is_constant() &&
1653 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1655 if (!new_val->is_register()) {
1656 LIR_Opr new_val_reg = new_register(T_OBJECT);
1657 if (new_val->is_constant()) {
1658 __ move(new_val, new_val_reg);
1659 } else {
1660 __ leal(new_val, new_val_reg);
1661 }
1662 new_val = new_val_reg;
1663 }
1664 assert(new_val->is_register(), "must be a register at this point");
1666 if (addr->is_address()) {
1667 LIR_Address* address = addr->as_address_ptr();
1668 LIR_Opr ptr = new_pointer_register();
1669 if (!address->index()->is_valid() && address->disp() == 0) {
1670 __ move(address->base(), ptr);
1671 } else {
1672 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1673 __ leal(addr, ptr);
1674 }
1675 addr = ptr;
1676 }
1677 assert(addr->is_register(), "must be a register at this point");
1679 LIR_Opr xor_res = new_pointer_register();
1680 LIR_Opr xor_shift_res = new_pointer_register();
1681 if (TwoOperandLIRForm ) {
1682 __ move(addr, xor_res);
1683 __ logical_xor(xor_res, new_val, xor_res);
1684 __ move(xor_res, xor_shift_res);
1685 __ unsigned_shift_right(xor_shift_res,
1686 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1687 xor_shift_res,
1688 LIR_OprDesc::illegalOpr());
1689 } else {
1690 __ logical_xor(addr, new_val, xor_res);
1691 __ unsigned_shift_right(xor_res,
1692 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1693 xor_shift_res,
1694 LIR_OprDesc::illegalOpr());
1695 }
1697 if (!new_val->is_register()) {
1698 LIR_Opr new_val_reg = new_register(T_OBJECT);
1699 __ leal(new_val, new_val_reg);
1700 new_val = new_val_reg;
1701 }
1702 assert(new_val->is_register(), "must be a register at this point");
1704 #ifndef MIPS64
1705 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1707 #endif
1708 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1709 #ifndef MIPS64
1710 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1711 #else
1712 __ branch(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst((intptr_t)NULL_WORD), LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1713 #endif
1714 __ branch_destination(slow->continuation());
1715 }
1717 #endif // INCLUDE_ALL_GCS
1718 ////////////////////////////////////////////////////////////////////////
1720 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1722 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1723 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1724 if (addr->is_address()) {
1725 LIR_Address* address = addr->as_address_ptr();
1726 // ptr cannot be an object because we use this barrier for array card marks
1727 // and addr can point in the middle of an array.
1728 LIR_Opr ptr = new_pointer_register();
1729 if (!address->index()->is_valid() && address->disp() == 0) {
1730 __ move(address->base(), ptr);
1731 } else {
1732 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1733 __ leal(addr, ptr);
1734 }
1735 addr = ptr;
1736 }
1737 assert(addr->is_register(), "must be a register at this point");
1739 #ifdef ARM
1740 // TODO: ARM - move to platform-dependent code
1741 LIR_Opr tmp = FrameMap::R14_opr;
1742 if (VM_Version::supports_movw()) {
1743 __ move((LIR_Opr)card_table_base, tmp);
1744 } else {
1745 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1746 }
1748 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1749 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1750 if(((int)ct->byte_map_base & 0xff) == 0) {
1751 __ move(tmp, card_addr);
1752 } else {
1753 LIR_Opr tmp_zero = new_register(T_INT);
1754 __ move(LIR_OprFact::intConst(0), tmp_zero);
1755 __ move(tmp_zero, card_addr);
1756 }
1757 #else // ARM
1758 LIR_Opr tmp = new_pointer_register();
1759 if (TwoOperandLIRForm) {
1760 __ move(addr, tmp);
1761 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1762 } else {
1763 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1764 }
1765 if (can_inline_as_constant(card_table_base)) {
1766 __ move(LIR_OprFact::intConst(0),
1767 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1768 } else {
1769 #ifndef MIPS64
1770 __ move(LIR_OprFact::intConst(0),
1771 new LIR_Address(tmp, load_constant(card_table_base),
1772 T_BYTE));
1773 #else
1774 __ add(tmp, load_constant(card_table_base), tmp);
1775 __ move(LIR_OprFact::intConst(0),
1776 new LIR_Address(tmp, 0,
1777 T_BYTE));
1778 #endif
1779 }
1780 #endif // ARM
1781 }
1784 //------------------------field access--------------------------------------
1786 // Comment copied form templateTable_i486.cpp
1787 // ----------------------------------------------------------------------------
1788 // Volatile variables demand their effects be made known to all CPU's in
1789 // order. Store buffers on most chips allow reads & writes to reorder; the
1790 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1791 // memory barrier (i.e., it's not sufficient that the interpreter does not
1792 // reorder volatile references, the hardware also must not reorder them).
1793 //
1794 // According to the new Java Memory Model (JMM):
1795 // (1) All volatiles are serialized wrt to each other.
1796 // ALSO reads & writes act as aquire & release, so:
1797 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1798 // the read float up to before the read. It's OK for non-volatile memory refs
1799 // that happen before the volatile read to float down below it.
1800 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1801 // that happen BEFORE the write float down to after the write. It's OK for
1802 // non-volatile memory refs that happen after the volatile write to float up
1803 // before it.
1804 //
1805 // We only put in barriers around volatile refs (they are expensive), not
1806 // _between_ memory refs (that would require us to track the flavor of the
1807 // previous memory refs). Requirements (2) and (3) require some barriers
1808 // before volatile stores and after volatile loads. These nearly cover
1809 // requirement (1) but miss the volatile-store-volatile-load case. This final
1810 // case is placed after volatile-stores although it could just as well go
1811 // before volatile-loads.
1814 void LIRGenerator::do_StoreField(StoreField* x) {
1815 bool needs_patching = x->needs_patching();
1816 bool is_volatile = x->field()->is_volatile();
1817 BasicType field_type = x->field_type();
1818 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1820 CodeEmitInfo* info = NULL;
1821 if (needs_patching) {
1822 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1823 info = state_for(x, x->state_before());
1824 } else if (x->needs_null_check()) {
1825 NullCheck* nc = x->explicit_null_check();
1826 if (nc == NULL) {
1827 info = state_for(x);
1828 } else {
1829 info = state_for(nc);
1830 }
1831 }
1834 LIRItem object(x->obj(), this);
1835 LIRItem value(x->value(), this);
1837 object.load_item();
1839 if (is_volatile || needs_patching) {
1840 // load item if field is volatile (fewer special cases for volatiles)
1841 // load item if field not initialized
1842 // load item if field not constant
1843 // because of code patching we cannot inline constants
1844 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1845 value.load_byte_item();
1846 } else {
1847 value.load_item();
1848 }
1849 } else {
1850 value.load_for_store(field_type);
1851 }
1853 set_no_result(x);
1855 #ifndef PRODUCT
1856 if (PrintNotLoaded && needs_patching) {
1857 tty->print_cr(" ###class not loaded at store_%s bci %d",
1858 x->is_static() ? "static" : "field", x->printable_bci());
1859 }
1860 #endif
1862 if (x->needs_null_check() &&
1863 (needs_patching ||
1864 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1865 // emit an explicit null check because the offset is too large
1866 __ null_check(object.result(), new CodeEmitInfo(info));
1867 }
1869 LIR_Address* address;
1870 if (needs_patching) {
1871 // we need to patch the offset in the instruction so don't allow
1872 // generate_address to try to be smart about emitting the -1.
1873 // Otherwise the patching code won't know how to find the
1874 // instruction to patch.
1875 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1876 } else {
1877 address = generate_address(object.result(), x->offset(), field_type);
1878 }
1880 if (is_volatile && os::is_MP()) {
1881 __ membar_release();
1882 }
1884 if (is_oop) {
1885 // Do the pre-write barrier, if any.
1886 pre_barrier(LIR_OprFact::address(address),
1887 LIR_OprFact::illegalOpr /* pre_val */,
1888 true /* do_load*/,
1889 needs_patching,
1890 (info ? new CodeEmitInfo(info) : NULL));
1891 }
1893 if (is_volatile && !needs_patching) {
1894 volatile_field_store(value.result(), address, info);
1895 } else {
1896 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1897 __ store(value.result(), address, info, patch_code);
1898 }
1900 if (is_oop) {
1901 // Store to object so mark the card of the header
1902 post_barrier(object.result(), value.result());
1903 }
1905 if (is_volatile && os::is_MP()) {
1906 __ membar();
1907 }
1908 }
1911 void LIRGenerator::do_LoadField(LoadField* x) {
1912 bool needs_patching = x->needs_patching();
1913 bool is_volatile = x->field()->is_volatile();
1914 BasicType field_type = x->field_type();
1916 CodeEmitInfo* info = NULL;
1917 if (needs_patching) {
1918 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1919 info = state_for(x, x->state_before());
1920 } else if (x->needs_null_check()) {
1921 NullCheck* nc = x->explicit_null_check();
1922 if (nc == NULL) {
1923 info = state_for(x);
1924 } else {
1925 info = state_for(nc);
1926 }
1927 }
1929 LIRItem object(x->obj(), this);
1931 object.load_item();
1933 #ifndef PRODUCT
1934 if (PrintNotLoaded && needs_patching) {
1935 tty->print_cr(" ###class not loaded at load_%s bci %d",
1936 x->is_static() ? "static" : "field", x->printable_bci());
1937 }
1938 #endif
1940 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1941 if (x->needs_null_check() &&
1942 (needs_patching ||
1943 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1944 stress_deopt)) {
1945 LIR_Opr obj = object.result();
1946 if (stress_deopt) {
1947 obj = new_register(T_OBJECT);
1948 __ move(LIR_OprFact::oopConst(NULL), obj);
1949 }
1950 // emit an explicit null check because the offset is too large
1951 __ null_check(obj, new CodeEmitInfo(info));
1952 }
1954 LIR_Opr reg = rlock_result(x, field_type);
1955 LIR_Address* address;
1956 if (needs_patching) {
1957 // we need to patch the offset in the instruction so don't allow
1958 // generate_address to try to be smart about emitting the -1.
1959 // Otherwise the patching code won't know how to find the
1960 // instruction to patch.
1961 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1962 } else {
1963 address = generate_address(object.result(), x->offset(), field_type);
1964 }
1966 if (is_volatile && !needs_patching) {
1967 volatile_field_load(address, reg, info);
1968 } else {
1969 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1970 __ load(address, reg, info, patch_code);
1971 }
1973 if (is_volatile && os::is_MP()) {
1974 __ membar_acquire();
1975 }
1976 }
1979 //------------------------java.nio.Buffer.checkIndex------------------------
1981 // int java.nio.Buffer.checkIndex(int)
1982 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1983 // NOTE: by the time we are in checkIndex() we are guaranteed that
1984 // the buffer is non-null (because checkIndex is package-private and
1985 // only called from within other methods in the buffer).
1986 assert(x->number_of_arguments() == 2, "wrong type");
1987 LIRItem buf (x->argument_at(0), this);
1988 LIRItem index(x->argument_at(1), this);
1989 buf.load_item();
1990 index.load_item();
1992 LIR_Opr result = rlock_result(x);
1993 if (GenerateRangeChecks) {
1994 CodeEmitInfo* info = state_for(x);
1995 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1996 if (index.result()->is_constant()) {
1997 #ifndef MIPS64
1998 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1999 __ branch(lir_cond_belowEqual, T_INT, stub);
2000 #else
2001 LIR_Opr left = LIR_OprFact::address(new LIR_Address( buf.result(),
2002 java_nio_Buffer::limit_offset(),T_INT));
2003 LIR_Opr right = LIR_OprFact::intConst(index.result()->as_jint());
2004 __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
2005 __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
2007 #endif
2008 } else {
2009 #ifndef MIPS64
2010 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
2011 java_nio_Buffer::limit_offset(), T_INT, info);
2012 __ branch(lir_cond_aboveEqual, T_INT, stub);
2013 #else
2014 LIR_Opr right = LIR_OprFact::address(new LIR_Address( buf.result(), java_nio_Buffer::limit_offset(),T_INT));
2015 LIR_Opr left = index.result();
2016 __ null_check_for_branch(lir_cond_aboveEqual, left, right, info);
2017 __ branch(lir_cond_aboveEqual, left, right , T_INT, stub); // forward branch
2018 #endif
2020 }
2021 __ move(index.result(), result);
2022 } else {
2023 // Just load the index into the result register
2024 __ move(index.result(), result);
2025 }
2026 }
2029 //------------------------array access--------------------------------------
2032 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
2033 LIRItem array(x->array(), this);
2034 array.load_item();
2035 LIR_Opr reg = rlock_result(x);
2037 CodeEmitInfo* info = NULL;
2038 if (x->needs_null_check()) {
2039 NullCheck* nc = x->explicit_null_check();
2040 if (nc == NULL) {
2041 info = state_for(x);
2042 } else {
2043 info = state_for(nc);
2044 }
2045 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
2046 LIR_Opr obj = new_register(T_OBJECT);
2047 __ move(LIR_OprFact::oopConst(NULL), obj);
2048 __ null_check(obj, new CodeEmitInfo(info));
2049 }
2050 }
2051 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
2052 }
2055 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
2056 bool use_length = x->length() != NULL;
2057 LIRItem array(x->array(), this);
2058 LIRItem index(x->index(), this);
2059 LIRItem length(this);
2060 bool needs_range_check = x->compute_needs_range_check();
2062 if (use_length && needs_range_check) {
2063 length.set_instruction(x->length());
2064 length.load_item();
2065 }
2067 array.load_item();
2068 if (index.is_constant() && can_inline_as_constant(x->index())) {
2069 // let it be a constant
2070 index.dont_load_item();
2071 } else {
2072 index.load_item();
2073 }
2075 CodeEmitInfo* range_check_info = state_for(x);
2076 CodeEmitInfo* null_check_info = NULL;
2077 if (x->needs_null_check()) {
2078 NullCheck* nc = x->explicit_null_check();
2079 if (nc != NULL) {
2080 null_check_info = state_for(nc);
2081 } else {
2082 null_check_info = range_check_info;
2083 }
2084 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
2085 LIR_Opr obj = new_register(T_OBJECT);
2086 __ move(LIR_OprFact::oopConst(NULL), obj);
2087 __ null_check(obj, new CodeEmitInfo(null_check_info));
2088 }
2089 }
2091 // emit array address setup early so it schedules better
2092 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
2094 if (GenerateRangeChecks && needs_range_check) {
2095 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2096 #ifndef MIPS64
2097 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
2098 #else
2099 tty->print_cr("LIRGenerator::do_LoadIndexed(LoadIndexed* x) unimplemented yet!");
2100 Unimplemented();
2101 #endif
2102 } else if (use_length) {
2103 // TODO: use a (modified) version of array_range_check that does not require a
2104 // constant length to be loaded to a register
2105 #ifndef MIPS64
2106 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2107 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
2108 #else
2109 __ branch(lir_cond_belowEqual, length.result(), index.result(),T_INT, new RangeCheckStub(range_check_info, index.result()));
2110 #endif
2111 } else {
2112 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2113 // The range check performs the null check, so clear it out for the load
2114 null_check_info = NULL;
2115 }
2116 }
2118 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
2119 }
2122 void LIRGenerator::do_NullCheck(NullCheck* x) {
2123 if (x->can_trap()) {
2124 LIRItem value(x->obj(), this);
2125 value.load_item();
2126 CodeEmitInfo* info = state_for(x);
2127 __ null_check(value.result(), info);
2128 }
2129 }
2132 void LIRGenerator::do_TypeCast(TypeCast* x) {
2133 LIRItem value(x->obj(), this);
2134 value.load_item();
2135 // the result is the same as from the node we are casting
2136 set_result(x, value.result());
2137 }
2140 void LIRGenerator::do_Throw(Throw* x) {
2141 LIRItem exception(x->exception(), this);
2142 exception.load_item();
2143 set_no_result(x);
2144 LIR_Opr exception_opr = exception.result();
2145 CodeEmitInfo* info = state_for(x, x->state());
2147 #ifndef PRODUCT
2148 if (PrintC1Statistics) {
2149 increment_counter(Runtime1::throw_count_address(), T_INT);
2150 }
2151 #endif
2153 // check if the instruction has an xhandler in any of the nested scopes
2154 bool unwind = false;
2155 if (info->exception_handlers()->length() == 0) {
2156 // this throw is not inside an xhandler
2157 unwind = true;
2158 } else {
2159 // get some idea of the throw type
2160 bool type_is_exact = true;
2161 ciType* throw_type = x->exception()->exact_type();
2162 if (throw_type == NULL) {
2163 type_is_exact = false;
2164 throw_type = x->exception()->declared_type();
2165 }
2166 if (throw_type != NULL && throw_type->is_instance_klass()) {
2167 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2168 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2169 }
2170 }
2172 // do null check before moving exception oop into fixed register
2173 // to avoid a fixed interval with an oop during the null check.
2174 // Use a copy of the CodeEmitInfo because debug information is
2175 // different for null_check and throw.
2176 if (GenerateCompilerNullChecks &&
2177 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2178 // if the exception object wasn't created using new then it might be null.
2179 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2180 }
2182 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2183 // we need to go through the exception lookup path to get JVMTI
2184 // notification done
2185 unwind = false;
2186 }
2188 // move exception oop into fixed register
2189 __ move(exception_opr, exceptionOopOpr());
2191 if (unwind) {
2192 __ unwind_exception(exceptionOopOpr());
2193 } else {
2194 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2195 }
2196 }
2199 void LIRGenerator::do_RoundFP(RoundFP* x) {
2200 LIRItem input(x->input(), this);
2201 input.load_item();
2202 LIR_Opr input_opr = input.result();
2203 assert(input_opr->is_register(), "why round if value is not in a register?");
2204 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2205 if (input_opr->is_single_fpu()) {
2206 set_result(x, round_item(input_opr)); // This code path not currently taken
2207 } else {
2208 LIR_Opr result = new_register(T_DOUBLE);
2209 set_vreg_flag(result, must_start_in_memory);
2210 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2211 set_result(x, result);
2212 }
2213 }
2215 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2216 LIRItem base(x->base(), this);
2217 LIRItem idx(this);
2219 base.load_item();
2220 if (x->has_index()) {
2221 idx.set_instruction(x->index());
2222 idx.load_nonconstant();
2223 }
2225 LIR_Opr reg = rlock_result(x, x->basic_type());
2227 int log2_scale = 0;
2228 if (x->has_index()) {
2229 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2230 log2_scale = x->log2_scale();
2231 }
2233 assert(!x->has_index() || idx.value() == x->index(), "should match");
2235 LIR_Opr base_op = base.result();
2236 #ifndef _LP64
2237 if (x->base()->type()->tag() == longTag) {
2238 base_op = new_register(T_INT);
2239 __ convert(Bytecodes::_l2i, base.result(), base_op);
2240 } else {
2241 assert(x->base()->type()->tag() == intTag, "must be");
2242 }
2243 #endif
2245 BasicType dst_type = x->basic_type();
2246 LIR_Opr index_op = idx.result();
2248 LIR_Address* addr;
2249 if (index_op->is_constant()) {
2250 assert(log2_scale == 0, "must not have a scale");
2251 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2252 } else {
2253 #ifdef X86
2254 #ifdef _LP64
2255 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2256 LIR_Opr tmp = new_pointer_register();
2257 __ convert(Bytecodes::_i2l, index_op, tmp);
2258 index_op = tmp;
2259 }
2260 #endif
2261 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2262 #elif defined(ARM)
2263 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2264 #else
2265 if (index_op->is_illegal() || log2_scale == 0) {
2266 #ifdef _LP64
2267 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2268 LIR_Opr tmp = new_pointer_register();
2269 __ convert(Bytecodes::_i2l, index_op, tmp);
2270 index_op = tmp;
2271 }
2272 #endif
2273 #ifndef MIPS64
2274 addr = new LIR_Address(base_op, index_op, dst_type);
2275 #else
2276 #ifdef _LP64
2277 LIR_Opr ptr = new_register(T_LONG);
2278 #else
2279 LIR_Opr ptr = new_register(T_INT);
2280 #endif
2281 __ move(base_op, ptr);
2282 if(index_op -> is_valid())
2283 __ add(ptr, index_op, ptr);
2284 addr = new LIR_Address(ptr, 0, dst_type);
2285 #endif
2286 } else {
2287 LIR_Opr tmp = new_pointer_register();
2288 __ shift_left(index_op, log2_scale, tmp);
2289 addr = new LIR_Address(base_op, tmp, dst_type);
2290 }
2291 #endif
2292 }
2294 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2295 __ unaligned_move(addr, reg);
2296 } else {
2297 if (dst_type == T_OBJECT && x->is_wide()) {
2298 __ move_wide(addr, reg);
2299 } else {
2300 __ move(addr, reg);
2301 }
2302 }
2303 }
2306 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2307 int log2_scale = 0;
2308 BasicType type = x->basic_type();
2310 if (x->has_index()) {
2311 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2312 log2_scale = x->log2_scale();
2313 }
2315 LIRItem base(x->base(), this);
2316 LIRItem value(x->value(), this);
2317 LIRItem idx(this);
2319 base.load_item();
2320 if (x->has_index()) {
2321 idx.set_instruction(x->index());
2322 idx.load_item();
2323 }
2325 if (type == T_BYTE || type == T_BOOLEAN) {
2326 value.load_byte_item();
2327 } else {
2328 value.load_item();
2329 }
2331 set_no_result(x);
2333 LIR_Opr base_op = base.result();
2334 #ifndef _LP64
2335 if (x->base()->type()->tag() == longTag) {
2336 base_op = new_register(T_INT);
2337 __ convert(Bytecodes::_l2i, base.result(), base_op);
2338 } else {
2339 assert(x->base()->type()->tag() == intTag, "must be");
2340 }
2341 #endif
2343 LIR_Opr index_op = idx.result();
2344 if (log2_scale != 0) {
2345 // temporary fix (platform dependent code without shift on Intel would be better)
2346 index_op = new_pointer_register();
2347 #ifdef _LP64
2348 if(idx.result()->type() == T_INT) {
2349 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2350 } else {
2351 #endif
2352 // TODO: ARM also allows embedded shift in the address
2353 __ move(idx.result(), index_op);
2354 #ifdef _LP64
2355 }
2356 #endif
2357 __ shift_left(index_op, log2_scale, index_op);
2358 }
2359 #ifdef _LP64
2360 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
2361 LIR_Opr tmp = new_pointer_register();
2362 __ convert(Bytecodes::_i2l, index_op, tmp);
2363 index_op = tmp;
2364 }
2365 #endif
2367 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2368 __ move(value.result(), addr);
2369 }
2372 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2373 BasicType type = x->basic_type();
2374 LIRItem src(x->object(), this);
2375 LIRItem off(x->offset(), this);
2377 off.load_item();
2378 src.load_item();
2380 LIR_Opr value = rlock_result(x, x->basic_type());
2382 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2384 #if INCLUDE_ALL_GCS
2385 // We might be reading the value of the referent field of a
2386 // Reference object in order to attach it back to the live
2387 // object graph. If G1 is enabled then we need to record
2388 // the value that is being returned in an SATB log buffer.
2389 //
2390 // We need to generate code similar to the following...
2391 //
2392 // if (offset == java_lang_ref_Reference::referent_offset) {
2393 // if (src != NULL) {
2394 // if (klass(src)->reference_type() != REF_NONE) {
2395 // pre_barrier(..., value, ...);
2396 // }
2397 // }
2398 // }
2400 if (UseG1GC && type == T_OBJECT) {
2401 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2402 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2403 bool gen_source_check = true; // Assume we need to check the src object for null.
2404 bool gen_type_check = true; // Assume we need to check the reference_type.
2406 if (off.is_constant()) {
2407 jlong off_con = (off.type()->is_int() ?
2408 (jlong) off.get_jint_constant() :
2409 off.get_jlong_constant());
2412 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2413 // The constant offset is something other than referent_offset.
2414 // We can skip generating/checking the remaining guards and
2415 // skip generation of the code stub.
2416 gen_pre_barrier = false;
2417 } else {
2418 // The constant offset is the same as referent_offset -
2419 // we do not need to generate a runtime offset check.
2420 gen_offset_check = false;
2421 }
2422 }
2424 // We don't need to generate stub if the source object is an array
2425 if (gen_pre_barrier && src.type()->is_array()) {
2426 gen_pre_barrier = false;
2427 }
2429 if (gen_pre_barrier) {
2430 // We still need to continue with the checks.
2431 if (src.is_constant()) {
2432 ciObject* src_con = src.get_jobject_constant();
2433 guarantee(src_con != NULL, "no source constant");
2435 if (src_con->is_null_object()) {
2436 // The constant src object is null - We can skip
2437 // generating the code stub.
2438 gen_pre_barrier = false;
2439 } else {
2440 // Non-null constant source object. We still have to generate
2441 // the slow stub - but we don't need to generate the runtime
2442 // null object check.
2443 gen_source_check = false;
2444 }
2445 }
2446 }
2447 if (gen_pre_barrier && !PatchALot) {
2448 // Can the klass of object be statically determined to be
2449 // a sub-class of Reference?
2450 ciType* type = src.value()->declared_type();
2451 if ((type != NULL) && type->is_loaded()) {
2452 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2453 gen_type_check = false;
2454 } else if (type->is_klass() &&
2455 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2456 // Not Reference and not Object klass.
2457 gen_pre_barrier = false;
2458 }
2459 }
2460 }
2462 if (gen_pre_barrier) {
2463 LabelObj* Lcont = new LabelObj();
2465 // We can have generate one runtime check here. Let's start with
2466 // the offset check.
2467 if (gen_offset_check) {
2468 // if (offset != referent_offset) -> continue
2469 // If offset is an int then we can do the comparison with the
2470 // referent_offset constant; otherwise we need to move
2471 // referent_offset into a temporary register and generate
2472 // a reg-reg compare.
2474 LIR_Opr referent_off;
2476 if (off.type()->is_int()) {
2477 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2478 } else {
2479 assert(off.type()->is_long(), "what else?");
2480 referent_off = new_register(T_LONG);
2481 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2482 }
2483 #ifndef MIPS64
2484 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2485 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2486 #else
2487 __ branch(lir_cond_notEqual, off.result(), referent_off, Lcont->label());
2488 #endif
2489 }
2490 if (gen_source_check) {
2491 // offset is a const and equals referent offset
2492 // if (source == null) -> continue
2493 #ifndef MIPS64
2494 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2495 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2496 #else
2497 __ branch(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL), Lcont->label());
2498 #endif
2499 }
2500 LIR_Opr src_klass = new_register(T_OBJECT);
2501 if (gen_type_check) {
2502 // We have determined that offset == referent_offset && src != null.
2503 // if (src->_klass->_reference_type == REF_NONE) -> continue
2504 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2505 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2506 LIR_Opr reference_type = new_register(T_INT);
2507 __ move(reference_type_addr, reference_type);
2508 #ifndef MIPS64
2509 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2510 __ branch(lir_cond_equal, T_INT, Lcont->label());
2511 #else
2512 __ branch(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE), Lcont->label());
2513 #endif
2514 }
2515 {
2516 // We have determined that src->_klass->_reference_type != REF_NONE
2517 // so register the value in the referent field with the pre-barrier.
2518 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2519 value /* pre_val */,
2520 false /* do_load */,
2521 false /* patch */,
2522 NULL /* info */);
2523 }
2524 __ branch_destination(Lcont->label());
2525 }
2526 }
2527 #endif // INCLUDE_ALL_GCS
2529 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2530 }
2533 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2534 BasicType type = x->basic_type();
2535 LIRItem src(x->object(), this);
2536 LIRItem off(x->offset(), this);
2537 LIRItem data(x->value(), this);
2539 src.load_item();
2540 if (type == T_BOOLEAN || type == T_BYTE) {
2541 data.load_byte_item();
2542 } else {
2543 data.load_item();
2544 }
2545 off.load_item();
2547 set_no_result(x);
2549 if (x->is_volatile() && os::is_MP()) __ membar_release();
2550 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2551 if (x->is_volatile() && os::is_MP()) __ membar();
2552 }
2555 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2556 LIRItem src(x->object(), this);
2557 LIRItem off(x->offset(), this);
2559 src.load_item();
2560 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2561 // let it be a constant
2562 off.dont_load_item();
2563 } else {
2564 off.load_item();
2565 }
2567 set_no_result(x);
2569 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2570 __ prefetch(addr, is_store);
2571 }
2574 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2575 do_UnsafePrefetch(x, false);
2576 }
2579 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2580 do_UnsafePrefetch(x, true);
2581 }
2584 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2585 int lng = x->length();
2587 for (int i = 0; i < lng; i++) {
2588 SwitchRange* one_range = x->at(i);
2589 int low_key = one_range->low_key();
2590 int high_key = one_range->high_key();
2591 BlockBegin* dest = one_range->sux();
2592 if (low_key == high_key) {
2593 #ifndef MIPS64
2594 __ cmp(lir_cond_equal, value, low_key);
2595 __ branch(lir_cond_equal, T_INT, dest);
2596 #else
2597 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(low_key), T_INT, dest);
2598 #endif
2599 } else if (high_key - low_key == 1) {
2600 #ifndef MIPS64
2601 __ cmp(lir_cond_equal, value, low_key);
2602 __ branch(lir_cond_equal, T_INT, dest);
2603 __ cmp(lir_cond_equal, value, high_key);
2604 __ branch(lir_cond_equal, T_INT, dest);
2605 #else
2606 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(low_key), T_INT, dest);
2607 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(high_key), T_INT, dest);
2609 #endif
2610 } else {
2611 LabelObj* L = new LabelObj();
2612 #ifndef MIPS64
2613 __ cmp(lir_cond_less, value, low_key);
2614 __ branch(lir_cond_less, T_INT, L->label());
2615 __ cmp(lir_cond_lessEqual, value, high_key);
2616 __ branch(lir_cond_lessEqual, T_INT, dest);
2617 __ branch_destination(L->label());
2618 #else
2619 __ branch(lir_cond_less, value, LIR_OprFact::intConst(low_key), L->label());
2620 __ branch(lir_cond_lessEqual, value, LIR_OprFact::intConst(high_key), T_INT, dest);
2621 __ branch_destination(L->label());
2622 #endif
2623 }
2624 }
2625 __ jump(default_sux);
2626 }
2629 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2630 SwitchRangeList* res = new SwitchRangeList();
2631 int len = x->length();
2632 if (len > 0) {
2633 BlockBegin* sux = x->sux_at(0);
2634 int key = x->lo_key();
2635 BlockBegin* default_sux = x->default_sux();
2636 SwitchRange* range = new SwitchRange(key, sux);
2637 for (int i = 0; i < len; i++, key++) {
2638 BlockBegin* new_sux = x->sux_at(i);
2639 if (sux == new_sux) {
2640 // still in same range
2641 range->set_high_key(key);
2642 } else {
2643 // skip tests which explicitly dispatch to the default
2644 if (sux != default_sux) {
2645 res->append(range);
2646 }
2647 range = new SwitchRange(key, new_sux);
2648 }
2649 sux = new_sux;
2650 }
2651 if (res->length() == 0 || res->last() != range) res->append(range);
2652 }
2653 return res;
2654 }
2657 // we expect the keys to be sorted by increasing value
2658 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2659 SwitchRangeList* res = new SwitchRangeList();
2660 int len = x->length();
2661 if (len > 0) {
2662 BlockBegin* default_sux = x->default_sux();
2663 int key = x->key_at(0);
2664 BlockBegin* sux = x->sux_at(0);
2665 SwitchRange* range = new SwitchRange(key, sux);
2666 for (int i = 1; i < len; i++) {
2667 int new_key = x->key_at(i);
2668 BlockBegin* new_sux = x->sux_at(i);
2669 if (key+1 == new_key && sux == new_sux) {
2670 // still in same range
2671 range->set_high_key(new_key);
2672 } else {
2673 // skip tests which explicitly dispatch to the default
2674 if (range->sux() != default_sux) {
2675 res->append(range);
2676 }
2677 range = new SwitchRange(new_key, new_sux);
2678 }
2679 key = new_key;
2680 sux = new_sux;
2681 }
2682 if (res->length() == 0 || res->last() != range) res->append(range);
2683 }
2684 return res;
2685 }
2688 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2689 LIRItem tag(x->tag(), this);
2690 tag.load_item();
2691 set_no_result(x);
2693 if (x->is_safepoint()) {
2694 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2695 }
2697 // move values into phi locations
2698 move_to_phi(x->state());
2700 int lo_key = x->lo_key();
2701 int hi_key = x->hi_key();
2702 int len = x->length();
2703 LIR_Opr value = tag.result();
2704 if (UseTableRanges) {
2705 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2706 } else {
2707 for (int i = 0; i < len; i++) {
2708 #ifndef MIPS64
2709 __ cmp(lir_cond_equal, value, i + lo_key);
2710 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2711 #else
2712 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(i+lo_key), T_INT, x->sux_at(i));
2713 #endif
2714 }
2715 __ jump(x->default_sux());
2716 }
2717 }
2720 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2721 LIRItem tag(x->tag(), this);
2722 tag.load_item();
2723 set_no_result(x);
2725 if (x->is_safepoint()) {
2726 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2727 }
2729 // move values into phi locations
2730 move_to_phi(x->state());
2732 LIR_Opr value = tag.result();
2733 if (UseTableRanges) {
2734 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2735 } else {
2736 int len = x->length();
2737 for (int i = 0; i < len; i++) {
2738 #ifndef MIPS64
2739 __ cmp(lir_cond_equal, value, x->key_at(i));
2740 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2741 #else
2742 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(x->key_at(i)), T_INT, x->sux_at(i));
2743 #endif
2744 }
2745 __ jump(x->default_sux());
2746 }
2747 }
2750 void LIRGenerator::do_Goto(Goto* x) {
2751 set_no_result(x);
2753 if (block()->next()->as_OsrEntry()) {
2754 // need to free up storage used for OSR entry point
2755 LIR_Opr osrBuffer = block()->next()->operand();
2756 BasicTypeList signature;
2757 signature.append(T_INT);
2758 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2759 __ move(osrBuffer, cc->args()->at(0));
2760 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2761 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2762 }
2764 if (x->is_safepoint()) {
2765 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2767 // increment backedge counter if needed
2768 CodeEmitInfo* info = state_for(x, state);
2769 increment_backedge_counter(info, x->profiled_bci());
2770 CodeEmitInfo* safepoint_info = state_for(x, state);
2771 __ safepoint(safepoint_poll_register(), safepoint_info);
2772 }
2774 // Gotos can be folded Ifs, handle this case.
2775 if (x->should_profile()) {
2776 ciMethod* method = x->profiled_method();
2777 assert(method != NULL, "method should be set if branch is profiled");
2778 ciMethodData* md = method->method_data_or_null();
2779 assert(md != NULL, "Sanity");
2780 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2781 assert(data != NULL, "must have profiling data");
2782 int offset;
2783 if (x->direction() == Goto::taken) {
2784 assert(data->is_BranchData(), "need BranchData for two-way branches");
2785 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2786 } else if (x->direction() == Goto::not_taken) {
2787 assert(data->is_BranchData(), "need BranchData for two-way branches");
2788 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2789 } else {
2790 assert(data->is_JumpData(), "need JumpData for branches");
2791 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2792 }
2793 LIR_Opr md_reg = new_register(T_METADATA);
2794 __ metadata2reg(md->constant_encoding(), md_reg);
2796 increment_counter(new LIR_Address(md_reg, offset,
2797 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2798 }
2800 // emit phi-instruction move after safepoint since this simplifies
2801 // describing the state as the safepoint.
2802 move_to_phi(x->state());
2804 __ jump(x->default_sux());
2805 }
2807 /**
2808 * Emit profiling code if needed for arguments, parameters, return value types
2809 *
2810 * @param md MDO the code will update at runtime
2811 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2812 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2813 * @param profiled_k current profile
2814 * @param obj IR node for the object to be profiled
2815 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2816 * Set once we find an update to make and use for next ones.
2817 * @param not_null true if we know obj cannot be null
2818 * @param signature_at_call_k signature at call for obj
2819 * @param callee_signature_k signature of callee for obj
2820 * at call and callee signatures differ at method handle call
2821 * @return the only klass we know will ever be seen at this profile point
2822 */
2823 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2824 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2825 ciKlass* callee_signature_k) {
2826 ciKlass* result = NULL;
2827 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2828 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2829 // known not to be null or null bit already set and already set to
2830 // unknown: nothing we can do to improve profiling
2831 if (!do_null && !do_update) {
2832 return result;
2833 }
2835 ciKlass* exact_klass = NULL;
2836 Compilation* comp = Compilation::current();
2837 if (do_update) {
2838 // try to find exact type, using CHA if possible, so that loading
2839 // the klass from the object can be avoided
2840 ciType* type = obj->exact_type();
2841 if (type == NULL) {
2842 type = obj->declared_type();
2843 type = comp->cha_exact_type(type);
2844 }
2845 assert(type == NULL || type->is_klass(), "type should be class");
2846 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2848 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2849 }
2851 if (!do_null && !do_update) {
2852 return result;
2853 }
2855 ciKlass* exact_signature_k = NULL;
2856 if (do_update) {
2857 // Is the type from the signature exact (the only one possible)?
2858 exact_signature_k = signature_at_call_k->exact_klass();
2859 if (exact_signature_k == NULL) {
2860 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2861 } else {
2862 result = exact_signature_k;
2863 // Known statically. No need to emit any code: prevent
2864 // LIR_Assembler::emit_profile_type() from emitting useless code
2865 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2866 }
2867 // exact_klass and exact_signature_k can be both non NULL but
2868 // different if exact_klass is loaded after the ciObject for
2869 // exact_signature_k is created.
2870 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2871 // sometimes the type of the signature is better than the best type
2872 // the compiler has
2873 exact_klass = exact_signature_k;
2874 }
2875 if (callee_signature_k != NULL &&
2876 callee_signature_k != signature_at_call_k) {
2877 ciKlass* improved_klass = callee_signature_k->exact_klass();
2878 if (improved_klass == NULL) {
2879 improved_klass = comp->cha_exact_type(callee_signature_k);
2880 }
2881 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2882 exact_klass = exact_signature_k;
2883 }
2884 }
2885 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2886 }
2888 if (!do_null && !do_update) {
2889 return result;
2890 }
2892 if (mdp == LIR_OprFact::illegalOpr) {
2893 mdp = new_register(T_METADATA);
2894 __ metadata2reg(md->constant_encoding(), mdp);
2895 if (md_base_offset != 0) {
2896 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2897 mdp = new_pointer_register();
2898 __ leal(LIR_OprFact::address(base_type_address), mdp);
2899 }
2900 }
2901 LIRItem value(obj, this);
2902 value.load_item();
2903 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2904 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2905 return result;
2906 }
2908 // profile parameters on entry to the root of the compilation
2909 void LIRGenerator::profile_parameters(Base* x) {
2910 if (compilation()->profile_parameters()) {
2911 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2912 ciMethodData* md = scope()->method()->method_data_or_null();
2913 assert(md != NULL, "Sanity");
2915 if (md->parameters_type_data() != NULL) {
2916 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2917 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2918 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2919 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2920 LIR_Opr src = args->at(i);
2921 assert(!src->is_illegal(), "check");
2922 BasicType t = src->type();
2923 if (t == T_OBJECT || t == T_ARRAY) {
2924 intptr_t profiled_k = parameters->type(j);
2925 Local* local = x->state()->local_at(java_index)->as_Local();
2926 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2927 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2928 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2929 // If the profile is known statically set it once for all and do not emit any code
2930 if (exact != NULL) {
2931 md->set_parameter_type(j, exact);
2932 }
2933 j++;
2934 }
2935 java_index += type2size[t];
2936 }
2937 }
2938 }
2939 }
2941 void LIRGenerator::do_Base(Base* x) {
2942 __ std_entry(LIR_OprFact::illegalOpr);
2943 // Emit moves from physical registers / stack slots to virtual registers
2944 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2945 IRScope* irScope = compilation()->hir()->top_scope();
2946 int java_index = 0;
2947 for (int i = 0; i < args->length(); i++) {
2948 LIR_Opr src = args->at(i);
2949 assert(!src->is_illegal(), "check");
2950 BasicType t = src->type();
2952 // Types which are smaller than int are passed as int, so
2953 // correct the type which passed.
2954 switch (t) {
2955 case T_BYTE:
2956 case T_BOOLEAN:
2957 case T_SHORT:
2958 case T_CHAR:
2959 t = T_INT;
2960 break;
2961 }
2963 LIR_Opr dest = new_register(t);
2964 __ move(src, dest);
2966 // Assign new location to Local instruction for this local
2967 Local* local = x->state()->local_at(java_index)->as_Local();
2968 assert(local != NULL, "Locals for incoming arguments must have been created");
2969 #ifndef __SOFTFP__
2970 // The java calling convention passes double as long and float as int.
2971 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2972 #endif // __SOFTFP__
2973 local->set_operand(dest);
2974 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2975 java_index += type2size[t];
2976 }
2978 if (compilation()->env()->dtrace_method_probes()) {
2979 BasicTypeList signature;
2980 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2981 signature.append(T_METADATA); // Method*
2982 LIR_OprList* args = new LIR_OprList();
2983 args->append(getThreadPointer());
2984 LIR_Opr meth = new_register(T_METADATA);
2985 __ metadata2reg(method()->constant_encoding(), meth);
2986 args->append(meth);
2987 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2988 }
2990 if (method()->is_synchronized()) {
2991 LIR_Opr obj;
2992 if (method()->is_static()) {
2993 obj = new_register(T_OBJECT);
2994 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2995 } else {
2996 Local* receiver = x->state()->local_at(0)->as_Local();
2997 assert(receiver != NULL, "must already exist");
2998 obj = receiver->operand();
2999 }
3000 assert(obj->is_valid(), "must be valid");
3002 if (method()->is_synchronized() && GenerateSynchronizationCode) {
3003 LIR_Opr lock = new_register(T_INT);
3004 __ load_stack_address_monitor(0, lock);
3006 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
3007 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3009 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
3010 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
3011 }
3012 }
3014 // increment invocation counters if needed
3015 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3016 profile_parameters(x);
3017 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
3018 increment_invocation_counter(info);
3019 }
3021 // all blocks with a successor must end with an unconditional jump
3022 // to the successor even if they are consecutive
3023 __ jump(x->default_sux());
3024 }
3027 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3028 // construct our frame and model the production of incoming pointer
3029 // to the OSR buffer.
3030 __ osr_entry(LIR_Assembler::osrBufferPointer());
3031 LIR_Opr result = rlock_result(x);
3032 __ move(LIR_Assembler::osrBufferPointer(), result);
3033 }
3036 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3037 assert(args->length() == arg_list->length(),
3038 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
3039 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3040 LIRItem* param = args->at(i);
3041 LIR_Opr loc = arg_list->at(i);
3042 if (loc->is_register()) {
3043 param->load_item_force(loc);
3044 } else {
3045 LIR_Address* addr = loc->as_address_ptr();
3046 param->load_for_store(addr->type());
3047 if (addr->type() == T_OBJECT) {
3048 __ move_wide(param->result(), addr);
3049 } else
3050 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3051 __ unaligned_move(param->result(), addr);
3052 } else {
3053 __ move(param->result(), addr);
3054 }
3055 }
3056 }
3058 if (x->has_receiver()) {
3059 LIRItem* receiver = args->at(0);
3060 LIR_Opr loc = arg_list->at(0);
3061 if (loc->is_register()) {
3062 receiver->load_item_force(loc);
3063 } else {
3064 assert(loc->is_address(), "just checking");
3065 receiver->load_for_store(T_OBJECT);
3066 __ move_wide(receiver->result(), loc->as_address_ptr());
3067 }
3068 }
3069 }
3072 // Visits all arguments, returns appropriate items without loading them
3073 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3074 LIRItemList* argument_items = new LIRItemList();
3075 if (x->has_receiver()) {
3076 LIRItem* receiver = new LIRItem(x->receiver(), this);
3077 argument_items->append(receiver);
3078 }
3079 for (int i = 0; i < x->number_of_arguments(); i++) {
3080 LIRItem* param = new LIRItem(x->argument_at(i), this);
3081 argument_items->append(param);
3082 }
3083 return argument_items;
3084 }
3087 // The invoke with receiver has following phases:
3088 // a) traverse and load/lock receiver;
3089 // b) traverse all arguments -> item-array (invoke_visit_argument)
3090 // c) push receiver on stack
3091 // d) load each of the items and push on stack
3092 // e) unlock receiver
3093 // f) move receiver into receiver-register %o0
3094 // g) lock result registers and emit call operation
3095 //
3096 // Before issuing a call, we must spill-save all values on stack
3097 // that are in caller-save register. "spill-save" moves thos registers
3098 // either in a free callee-save register or spills them if no free
3099 // callee save register is available.
3100 //
3101 // The problem is where to invoke spill-save.
3102 // - if invoked between e) and f), we may lock callee save
3103 // register in "spill-save" that destroys the receiver register
3104 // before f) is executed
3105 // - if we rearange the f) to be earlier, by loading %o0, it
3106 // may destroy a value on the stack that is currently in %o0
3107 // and is waiting to be spilled
3108 // - if we keep the receiver locked while doing spill-save,
3109 // we cannot spill it as it is spill-locked
3110 //
3111 void LIRGenerator::do_Invoke(Invoke* x) {
3112 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
3114 LIR_OprList* arg_list = cc->args();
3115 LIRItemList* args = invoke_visit_arguments(x);
3116 LIR_Opr receiver = LIR_OprFact::illegalOpr;
3118 // setup result register
3119 LIR_Opr result_register = LIR_OprFact::illegalOpr;
3120 if (x->type() != voidType) {
3121 result_register = result_register_for(x->type());
3122 }
3124 CodeEmitInfo* info = state_for(x, x->state());
3126 invoke_load_arguments(x, args, arg_list);
3128 if (x->has_receiver()) {
3129 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
3130 receiver = args->at(0)->result();
3131 }
3133 // emit invoke code
3134 bool optimized = x->target_is_loaded() && x->target_is_final();
3135 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
3137 // JSR 292
3138 // Preserve the SP over MethodHandle call sites.
3139 ciMethod* target = x->target();
3140 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
3141 target->is_method_handle_intrinsic() ||
3142 target->is_compiled_lambda_form());
3143 if (is_method_handle_invoke) {
3144 info->set_is_method_handle_invoke(true);
3145 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
3146 }
3148 switch (x->code()) {
3149 case Bytecodes::_invokestatic:
3150 __ call_static(target, result_register,
3151 SharedRuntime::get_resolve_static_call_stub(),
3152 arg_list, info);
3153 break;
3154 case Bytecodes::_invokespecial:
3155 case Bytecodes::_invokevirtual:
3156 case Bytecodes::_invokeinterface:
3157 // for final target we still produce an inline cache, in order
3158 // to be able to call mixed mode
3159 if (x->code() == Bytecodes::_invokespecial || optimized) {
3160 __ call_opt_virtual(target, receiver, result_register,
3161 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3162 arg_list, info);
3163 } else if (x->vtable_index() < 0) {
3164 __ call_icvirtual(target, receiver, result_register,
3165 SharedRuntime::get_resolve_virtual_call_stub(),
3166 arg_list, info);
3167 } else {
3168 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
3169 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
3170 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
3171 }
3172 break;
3173 case Bytecodes::_invokedynamic: {
3174 __ call_dynamic(target, receiver, result_register,
3175 SharedRuntime::get_resolve_static_call_stub(),
3176 arg_list, info);
3177 break;
3178 }
3179 default:
3180 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
3181 break;
3182 }
3184 // JSR 292
3185 // Restore the SP after MethodHandle call sites.
3186 if (is_method_handle_invoke) {
3187 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
3188 }
3190 if (x->type()->is_float() || x->type()->is_double()) {
3191 // Force rounding of results from non-strictfp when in strictfp
3192 // scope (or when we don't know the strictness of the callee, to
3193 // be safe.)
3194 if (method()->is_strict()) {
3195 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
3196 result_register = round_item(result_register);
3197 }
3198 }
3199 }
3201 if (result_register->is_valid()) {
3202 LIR_Opr result = rlock_result(x);
3203 __ move(result_register, result);
3204 }
3205 }
3208 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3209 assert(x->number_of_arguments() == 1, "wrong type");
3210 LIRItem value (x->argument_at(0), this);
3211 LIR_Opr reg = rlock_result(x);
3212 value.load_item();
3213 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3214 __ move(tmp, reg);
3215 }
3219 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3220 void LIRGenerator::do_IfOp(IfOp* x) {
3221 #ifdef ASSERT
3222 {
3223 ValueTag xtag = x->x()->type()->tag();
3224 ValueTag ttag = x->tval()->type()->tag();
3225 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3226 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3227 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3228 }
3229 #endif
3231 LIRItem left(x->x(), this);
3232 LIRItem right(x->y(), this);
3233 left.load_item();
3234 if (can_inline_as_constant(right.value())) {
3235 right.dont_load_item();
3236 } else {
3237 right.load_item();
3238 }
3240 LIRItem t_val(x->tval(), this);
3241 LIRItem f_val(x->fval(), this);
3242 t_val.dont_load_item();
3243 f_val.dont_load_item();
3244 LIR_Opr reg = rlock_result(x);
3245 #ifndef MIPS64
3246 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3247 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3248 #else
3249 LIR_Opr opr1 = t_val.result();
3250 LIR_Opr opr2 = f_val.result();
3251 LabelObj* skip = new LabelObj();
3252 __ move(opr1, reg);
3253 __ branch(lir_cond(x->cond()), left.result(), right.result(), skip->label());
3254 __ move(opr2, reg);
3255 __ branch_destination(skip->label());
3256 #endif
3257 }
3259 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3260 assert(x->number_of_arguments() == expected_arguments, "wrong type");
3261 LIR_Opr reg = result_register_for(x->type());
3262 __ call_runtime_leaf(routine, getThreadTemp(),
3263 reg, new LIR_OprList());
3264 LIR_Opr result = rlock_result(x);
3265 __ move(reg, result);
3266 }
3268 #ifdef TRACE_HAVE_INTRINSICS
3269 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3270 LIR_Opr thread = getThreadPointer();
3271 LIR_Opr osthread = new_pointer_register();
3272 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3273 size_t thread_id_size = OSThread::thread_id_size();
3274 if (thread_id_size == (size_t) BytesPerLong) {
3275 LIR_Opr id = new_register(T_LONG);
3276 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3277 __ convert(Bytecodes::_l2i, id, rlock_result(x));
3278 } else if (thread_id_size == (size_t) BytesPerInt) {
3279 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3280 } else {
3281 ShouldNotReachHere();
3282 }
3283 }
3285 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3286 CodeEmitInfo* info = state_for(x);
3287 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3288 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3289 assert(info != NULL, "must have info");
3290 LIRItem arg(x->argument_at(1), this);
3291 arg.load_item();
3292 LIR_Opr klass = new_pointer_register();
3293 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3294 LIR_Opr id = new_register(T_LONG);
3295 ByteSize offset = TRACE_ID_OFFSET;
3296 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3297 __ move(trace_id_addr, id);
3298 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3299 __ store(id, trace_id_addr);
3300 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3301 __ move(id, rlock_result(x));
3302 }
3303 #endif
3305 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3306 switch (x->id()) {
3307 case vmIntrinsics::_intBitsToFloat :
3308 case vmIntrinsics::_doubleToRawLongBits :
3309 case vmIntrinsics::_longBitsToDouble :
3310 case vmIntrinsics::_floatToRawIntBits : {
3311 do_FPIntrinsics(x);
3312 break;
3313 }
3315 #ifdef TRACE_HAVE_INTRINSICS
3316 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
3317 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
3318 case vmIntrinsics::_counterTime:
3319 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
3320 break;
3321 #endif
3323 case vmIntrinsics::_currentTimeMillis:
3324 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3325 break;
3327 case vmIntrinsics::_nanoTime:
3328 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3329 break;
3331 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3332 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3333 case vmIntrinsics::_getClass: do_getClass(x); break;
3334 case vmIntrinsics::_currentThread: do_currentThread(x); break;
3336 case vmIntrinsics::_dlog: // fall through
3337 case vmIntrinsics::_dlog10: // fall through
3338 case vmIntrinsics::_dabs: // fall through
3339 case vmIntrinsics::_dsqrt: // fall through
3340 case vmIntrinsics::_dtan: // fall through
3341 case vmIntrinsics::_dsin : // fall through
3342 case vmIntrinsics::_dcos : // fall through
3343 case vmIntrinsics::_dexp : // fall through
3344 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3345 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3347 // java.nio.Buffer.checkIndex
3348 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3350 case vmIntrinsics::_compareAndSwapObject:
3351 do_CompareAndSwap(x, objectType);
3352 break;
3353 case vmIntrinsics::_compareAndSwapInt:
3354 do_CompareAndSwap(x, intType);
3355 break;
3356 case vmIntrinsics::_compareAndSwapLong:
3357 do_CompareAndSwap(x, longType);
3358 break;
3360 case vmIntrinsics::_loadFence :
3361 if (os::is_MP()) __ membar_acquire();
3362 break;
3363 case vmIntrinsics::_storeFence:
3364 if (os::is_MP()) __ membar_release();
3365 break;
3366 case vmIntrinsics::_fullFence :
3367 if (os::is_MP()) __ membar();
3368 break;
3370 case vmIntrinsics::_Reference_get:
3371 do_Reference_get(x);
3372 break;
3374 case vmIntrinsics::_updateCRC32:
3375 case vmIntrinsics::_updateBytesCRC32:
3376 case vmIntrinsics::_updateByteBufferCRC32:
3377 do_update_CRC32(x);
3378 break;
3380 default: ShouldNotReachHere(); break;
3381 }
3382 }
3384 void LIRGenerator::profile_arguments(ProfileCall* x) {
3385 if (compilation()->profile_arguments()) {
3386 int bci = x->bci_of_invoke();
3387 ciMethodData* md = x->method()->method_data_or_null();
3388 ciProfileData* data = md->bci_to_data(bci);
3389 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3390 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3391 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3392 int base_offset = md->byte_offset_of_slot(data, extra);
3393 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3394 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3396 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3397 int start = 0;
3398 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3399 if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3400 // first argument is not profiled at call (method handle invoke)
3401 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3402 start = 1;
3403 }
3404 ciSignature* callee_signature = x->callee()->signature();
3405 // method handle call to virtual method
3406 bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3407 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3409 bool ignored_will_link;
3410 ciSignature* signature_at_call = NULL;
3411 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3412 ciSignatureStream signature_at_call_stream(signature_at_call);
3414 // if called through method handle invoke, some arguments may have been popped
3415 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3416 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3417 ciKlass* exact = profile_type(md, base_offset, off,
3418 args->type(i), x->profiled_arg_at(i+start), mdp,
3419 !x->arg_needs_null_check(i+start),
3420 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3421 if (exact != NULL) {
3422 md->set_argument_type(bci, i, exact);
3423 }
3424 }
3425 } else {
3426 #ifdef ASSERT
3427 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3428 int n = x->nb_profiled_args();
3429 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3430 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3431 "only at JSR292 bytecodes");
3432 #endif
3433 }
3434 }
3435 }
3437 // profile parameters on entry to an inlined method
3438 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3439 if (compilation()->profile_parameters() && x->inlined()) {
3440 ciMethodData* md = x->callee()->method_data_or_null();
3441 if (md != NULL) {
3442 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3443 if (parameters_type_data != NULL) {
3444 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3445 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3446 bool has_receiver = !x->callee()->is_static();
3447 ciSignature* sig = x->callee()->signature();
3448 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3449 int i = 0; // to iterate on the Instructions
3450 Value arg = x->recv();
3451 bool not_null = false;
3452 int bci = x->bci_of_invoke();
3453 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3454 // The first parameter is the receiver so that's what we start
3455 // with if it exists. One exception is method handle call to
3456 // virtual method: the receiver is in the args list
3457 if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3458 i = 1;
3459 arg = x->profiled_arg_at(0);
3460 not_null = !x->arg_needs_null_check(0);
3461 }
3462 int k = 0; // to iterate on the profile data
3463 for (;;) {
3464 intptr_t profiled_k = parameters->type(k);
3465 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3466 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3467 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3468 // If the profile is known statically set it once for all and do not emit any code
3469 if (exact != NULL) {
3470 md->set_parameter_type(k, exact);
3471 }
3472 k++;
3473 if (k >= parameters_type_data->number_of_parameters()) {
3474 #ifdef ASSERT
3475 int extra = 0;
3476 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3477 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3478 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3479 extra += 1;
3480 }
3481 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3482 #endif
3483 break;
3484 }
3485 arg = x->profiled_arg_at(i);
3486 not_null = !x->arg_needs_null_check(i);
3487 i++;
3488 }
3489 }
3490 }
3491 }
3492 }
3494 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3495 // Need recv in a temporary register so it interferes with the other temporaries
3496 LIR_Opr recv = LIR_OprFact::illegalOpr;
3497 LIR_Opr mdo = new_register(T_OBJECT);
3498 // tmp is used to hold the counters on SPARC
3499 LIR_Opr tmp = new_pointer_register();
3501 if (x->nb_profiled_args() > 0) {
3502 profile_arguments(x);
3503 }
3505 // profile parameters on inlined method entry including receiver
3506 if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3507 profile_parameters_at_call(x);
3508 }
3510 if (x->recv() != NULL) {
3511 LIRItem value(x->recv(), this);
3512 value.load_item();
3513 recv = new_register(T_OBJECT);
3514 __ move(value.result(), recv);
3515 }
3516 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3517 }
3519 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3520 int bci = x->bci_of_invoke();
3521 ciMethodData* md = x->method()->method_data_or_null();
3522 ciProfileData* data = md->bci_to_data(bci);
3523 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3524 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3525 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3527 bool ignored_will_link;
3528 ciSignature* signature_at_call = NULL;
3529 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3531 // The offset within the MDO of the entry to update may be too large
3532 // to be used in load/store instructions on some platforms. So have
3533 // profile_type() compute the address of the profile in a register.
3534 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3535 ret->type(), x->ret(), mdp,
3536 !x->needs_null_check(),
3537 signature_at_call->return_type()->as_klass(),
3538 x->callee()->signature()->return_type()->as_klass());
3539 if (exact != NULL) {
3540 md->set_return_type(bci, exact);
3541 }
3542 }
3544 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3545 // We can safely ignore accessors here, since c2 will inline them anyway,
3546 // accessors are also always mature.
3547 if (!x->inlinee()->is_accessor()) {
3548 CodeEmitInfo* info = state_for(x, x->state(), true);
3549 // Notify the runtime very infrequently only to take care of counter overflows
3550 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3551 }
3552 }
3554 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3555 int freq_log;
3556 int level = compilation()->env()->comp_level();
3557 if (level == CompLevel_limited_profile) {
3558 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3559 } else if (level == CompLevel_full_profile) {
3560 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3561 } else {
3562 ShouldNotReachHere();
3563 }
3564 // Increment the appropriate invocation/backedge counter and notify the runtime.
3565 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3566 }
3568 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3569 ciMethod *method, int frequency,
3570 int bci, bool backedge, bool notify) {
3571 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3572 int level = _compilation->env()->comp_level();
3573 assert(level > CompLevel_simple, "Shouldn't be here");
3575 int offset = -1;
3576 LIR_Opr counter_holder;
3577 if (level == CompLevel_limited_profile) {
3578 MethodCounters* counters_adr = method->ensure_method_counters();
3579 if (counters_adr == NULL) {
3580 bailout("method counters allocation failed");
3581 return;
3582 }
3583 counter_holder = new_pointer_register();
3584 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3585 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3586 MethodCounters::invocation_counter_offset());
3587 } else if (level == CompLevel_full_profile) {
3588 counter_holder = new_register(T_METADATA);
3589 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3590 MethodData::invocation_counter_offset());
3591 ciMethodData* md = method->method_data_or_null();
3592 assert(md != NULL, "Sanity");
3593 __ metadata2reg(md->constant_encoding(), counter_holder);
3594 } else {
3595 ShouldNotReachHere();
3596 }
3597 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3598 LIR_Opr result = new_register(T_INT);
3599 __ load(counter, result);
3600 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3601 __ store(result, counter);
3602 if (notify) {
3603 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3604 LIR_Opr meth = new_register(T_METADATA);
3605 __ metadata2reg(method->constant_encoding(), meth);
3606 __ logical_and(result, mask, result);
3607 #ifndef MIPS64
3608 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3609 #endif
3610 // The bci for info can point to cmp for if's we want the if bci
3611 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3612 #ifndef MIPS64
3613 __ branch(lir_cond_equal, T_INT, overflow);
3614 #else
3615 __ branch(lir_cond_equal, result, LIR_OprFact::intConst(0), T_INT, overflow);
3616 #endif
3617 __ branch_destination(overflow->continuation());
3618 }
3619 }
3621 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3622 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3623 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3625 if (x->pass_thread()) {
3626 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3627 args->append(getThreadPointer());
3628 }
3630 for (int i = 0; i < x->number_of_arguments(); i++) {
3631 Value a = x->argument_at(i);
3632 LIRItem* item = new LIRItem(a, this);
3633 item->load_item();
3634 args->append(item->result());
3635 signature->append(as_BasicType(a->type()));
3636 }
3638 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3639 if (x->type() == voidType) {
3640 set_no_result(x);
3641 } else {
3642 __ move(result, rlock_result(x));
3643 }
3644 }
3646 #ifdef ASSERT
3647 void LIRGenerator::do_Assert(Assert *x) {
3648 ValueTag tag = x->x()->type()->tag();
3649 If::Condition cond = x->cond();
3651 LIRItem xitem(x->x(), this);
3652 LIRItem yitem(x->y(), this);
3653 LIRItem* xin = &xitem;
3654 LIRItem* yin = &yitem;
3656 assert(tag == intTag, "Only integer assertions are valid!");
3658 xin->load_item();
3659 yin->dont_load_item();
3661 set_no_result(x);
3663 LIR_Opr left = xin->result();
3664 LIR_Opr right = yin->result();
3666 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3667 }
3668 #endif
3670 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3673 Instruction *a = x->x();
3674 Instruction *b = x->y();
3675 if (!a || StressRangeCheckElimination) {
3676 assert(!b || StressRangeCheckElimination, "B must also be null");
3678 CodeEmitInfo *info = state_for(x, x->state());
3679 CodeStub* stub = new PredicateFailedStub(info);
3681 __ jump(stub);
3682 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3683 int a_int = a->type()->as_IntConstant()->value();
3684 int b_int = b->type()->as_IntConstant()->value();
3686 bool ok = false;
3688 switch(x->cond()) {
3689 case Instruction::eql: ok = (a_int == b_int); break;
3690 case Instruction::neq: ok = (a_int != b_int); break;
3691 case Instruction::lss: ok = (a_int < b_int); break;
3692 case Instruction::leq: ok = (a_int <= b_int); break;
3693 case Instruction::gtr: ok = (a_int > b_int); break;
3694 case Instruction::geq: ok = (a_int >= b_int); break;
3695 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3696 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3697 default: ShouldNotReachHere();
3698 }
3700 if (ok) {
3702 CodeEmitInfo *info = state_for(x, x->state());
3703 CodeStub* stub = new PredicateFailedStub(info);
3705 __ jump(stub);
3706 }
3707 } else {
3709 ValueTag tag = x->x()->type()->tag();
3710 If::Condition cond = x->cond();
3711 LIRItem xitem(x->x(), this);
3712 LIRItem yitem(x->y(), this);
3713 LIRItem* xin = &xitem;
3714 LIRItem* yin = &yitem;
3716 assert(tag == intTag, "Only integer deoptimizations are valid!");
3718 xin->load_item();
3719 yin->dont_load_item();
3720 set_no_result(x);
3722 LIR_Opr left = xin->result();
3723 LIR_Opr right = yin->result();
3725 CodeEmitInfo *info = state_for(x, x->state());
3726 CodeStub* stub = new PredicateFailedStub(info);
3728 #ifndef MIPS64
3729 __ cmp(lir_cond(cond), left, right);
3730 __ branch(lir_cond(cond), right->type(), stub);
3731 #else
3732 tty->print_cr("LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) unimplemented yet!");
3733 Unimplemented();
3734 #endif
3735 }
3736 }
3739 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3740 LIRItemList args(1);
3741 LIRItem value(arg1, this);
3742 args.append(&value);
3743 BasicTypeList signature;
3744 signature.append(as_BasicType(arg1->type()));
3746 return call_runtime(&signature, &args, entry, result_type, info);
3747 }
3750 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3751 LIRItemList args(2);
3752 LIRItem value1(arg1, this);
3753 LIRItem value2(arg2, this);
3754 args.append(&value1);
3755 args.append(&value2);
3756 BasicTypeList signature;
3757 signature.append(as_BasicType(arg1->type()));
3758 signature.append(as_BasicType(arg2->type()));
3760 return call_runtime(&signature, &args, entry, result_type, info);
3761 }
3764 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3765 address entry, ValueType* result_type, CodeEmitInfo* info) {
3766 // get a result register
3767 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3768 LIR_Opr result = LIR_OprFact::illegalOpr;
3769 if (result_type->tag() != voidTag) {
3770 result = new_register(result_type);
3771 phys_reg = result_register_for(result_type);
3772 }
3774 // move the arguments into the correct location
3775 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3776 assert(cc->length() == args->length(), "argument mismatch");
3777 for (int i = 0; i < args->length(); i++) {
3778 LIR_Opr arg = args->at(i);
3779 LIR_Opr loc = cc->at(i);
3780 if (loc->is_register()) {
3781 __ move(arg, loc);
3782 } else {
3783 LIR_Address* addr = loc->as_address_ptr();
3784 // if (!can_store_as_constant(arg)) {
3785 // LIR_Opr tmp = new_register(arg->type());
3786 // __ move(arg, tmp);
3787 // arg = tmp;
3788 // }
3789 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3790 __ unaligned_move(arg, addr);
3791 } else {
3792 __ move(arg, addr);
3793 }
3794 }
3795 }
3797 if (info) {
3798 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3799 } else {
3800 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3801 }
3802 if (result->is_valid()) {
3803 __ move(phys_reg, result);
3804 }
3805 return result;
3806 }
3809 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3810 address entry, ValueType* result_type, CodeEmitInfo* info) {
3811 // get a result register
3812 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3813 LIR_Opr result = LIR_OprFact::illegalOpr;
3814 if (result_type->tag() != voidTag) {
3815 result = new_register(result_type);
3816 phys_reg = result_register_for(result_type);
3817 }
3819 // move the arguments into the correct location
3820 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3822 assert(cc->length() == args->length(), "argument mismatch");
3823 for (int i = 0; i < args->length(); i++) {
3824 LIRItem* arg = args->at(i);
3825 LIR_Opr loc = cc->at(i);
3826 if (loc->is_register()) {
3827 arg->load_item_force(loc);
3828 } else {
3829 LIR_Address* addr = loc->as_address_ptr();
3830 arg->load_for_store(addr->type());
3831 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3832 __ unaligned_move(arg->result(), addr);
3833 } else {
3834 __ move(arg->result(), addr);
3835 }
3836 }
3837 }
3839 if (info) {
3840 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3841 } else {
3842 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3843 }
3844 if (result->is_valid()) {
3845 __ move(phys_reg, result);
3846 }
3847 return result;
3848 }
3850 void LIRGenerator::do_MemBar(MemBar* x) {
3851 if (os::is_MP()) {
3852 LIR_Code code = x->code();
3853 switch(code) {
3854 case lir_membar_acquire : __ membar_acquire(); break;
3855 case lir_membar_release : __ membar_release(); break;
3856 case lir_membar : __ membar(); break;
3857 case lir_membar_loadload : __ membar_loadload(); break;
3858 case lir_membar_storestore: __ membar_storestore(); break;
3859 case lir_membar_loadstore : __ membar_loadstore(); break;
3860 case lir_membar_storeload : __ membar_storeload(); break;
3861 default : ShouldNotReachHere(); break;
3862 }
3863 }
3864 }