Thu, 21 Mar 2013 09:27:54 +0100
7153771: array bound check elimination for c1
Summary: when possible optimize out array bound checks, inserting predicates when needed.
Reviewed-by: never, kvn, twisti
Contributed-by: thomaswue <thomas.wuerthinger@oracle.com>
1 /*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "ci/ciObjArray.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/bitMap.inline.hpp"
38 #include "utilities/macros.hpp"
39 #if INCLUDE_ALL_GCS
40 #include "gc_implementation/g1/heapRegion.hpp"
41 #endif // INCLUDE_ALL_GCS
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
49 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
50 #ifdef ARM
51 #define PATCHED_ADDR (204)
52 #else
53 #define PATCHED_ADDR (max_jint)
54 #endif
56 void PhiResolverState::reset(int max_vregs) {
57 // Initialize array sizes
58 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
59 _virtual_operands.trunc_to(0);
60 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
61 _other_operands.trunc_to(0);
62 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
63 _vreg_table.trunc_to(0);
64 }
68 //--------------------------------------------------------------
69 // PhiResolver
71 // Resolves cycles:
72 //
73 // r1 := r2 becomes temp := r1
74 // r2 := r1 r1 := r2
75 // r2 := temp
76 // and orders moves:
77 //
78 // r2 := r3 becomes r1 := r2
79 // r1 := r2 r2 := r3
81 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
82 : _gen(gen)
83 , _state(gen->resolver_state())
84 , _temp(LIR_OprFact::illegalOpr)
85 {
86 // reinitialize the shared state arrays
87 _state.reset(max_vregs);
88 }
91 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
92 assert(src->is_valid(), "");
93 assert(dest->is_valid(), "");
94 __ move(src, dest);
95 }
98 void PhiResolver::move_temp_to(LIR_Opr dest) {
99 assert(_temp->is_valid(), "");
100 emit_move(_temp, dest);
101 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
102 }
105 void PhiResolver::move_to_temp(LIR_Opr src) {
106 assert(_temp->is_illegal(), "");
107 _temp = _gen->new_register(src->type());
108 emit_move(src, _temp);
109 }
112 // Traverse assignment graph in depth first order and generate moves in post order
113 // ie. two assignments: b := c, a := b start with node c:
114 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
115 // Generates moves in this order: move b to a and move c to b
116 // ie. cycle a := b, b := a start with node a
117 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
118 // Generates moves in this order: move b to temp, move a to b, move temp to a
119 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
120 if (!dest->visited()) {
121 dest->set_visited();
122 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
123 move(dest, dest->destination_at(i));
124 }
125 } else if (!dest->start_node()) {
126 // cylce in graph detected
127 assert(_loop == NULL, "only one loop valid!");
128 _loop = dest;
129 move_to_temp(src->operand());
130 return;
131 } // else dest is a start node
133 if (!dest->assigned()) {
134 if (_loop == dest) {
135 move_temp_to(dest->operand());
136 dest->set_assigned();
137 } else if (src != NULL) {
138 emit_move(src->operand(), dest->operand());
139 dest->set_assigned();
140 }
141 }
142 }
145 PhiResolver::~PhiResolver() {
146 int i;
147 // resolve any cycles in moves from and to virtual registers
148 for (i = virtual_operands().length() - 1; i >= 0; i --) {
149 ResolveNode* node = virtual_operands()[i];
150 if (!node->visited()) {
151 _loop = NULL;
152 move(NULL, node);
153 node->set_start_node();
154 assert(_temp->is_illegal(), "move_temp_to() call missing");
155 }
156 }
158 // generate move for move from non virtual register to abitrary destination
159 for (i = other_operands().length() - 1; i >= 0; i --) {
160 ResolveNode* node = other_operands()[i];
161 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
162 emit_move(node->operand(), node->destination_at(j)->operand());
163 }
164 }
165 }
168 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
169 ResolveNode* node;
170 if (opr->is_virtual()) {
171 int vreg_num = opr->vreg_number();
172 node = vreg_table().at_grow(vreg_num, NULL);
173 assert(node == NULL || node->operand() == opr, "");
174 if (node == NULL) {
175 node = new ResolveNode(opr);
176 vreg_table()[vreg_num] = node;
177 }
178 // Make sure that all virtual operands show up in the list when
179 // they are used as the source of a move.
180 if (source && !virtual_operands().contains(node)) {
181 virtual_operands().append(node);
182 }
183 } else {
184 assert(source, "");
185 node = new ResolveNode(opr);
186 other_operands().append(node);
187 }
188 return node;
189 }
192 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
193 assert(dest->is_virtual(), "");
194 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
195 assert(src->is_valid(), "");
196 assert(dest->is_valid(), "");
197 ResolveNode* source = source_node(src);
198 source->append(destination_node(dest));
199 }
202 //--------------------------------------------------------------
203 // LIRItem
205 void LIRItem::set_result(LIR_Opr opr) {
206 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
207 value()->set_operand(opr);
209 if (opr->is_virtual()) {
210 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
211 }
213 _result = opr;
214 }
216 void LIRItem::load_item() {
217 if (result()->is_illegal()) {
218 // update the items result
219 _result = value()->operand();
220 }
221 if (!result()->is_register()) {
222 LIR_Opr reg = _gen->new_register(value()->type());
223 __ move(result(), reg);
224 if (result()->is_constant()) {
225 _result = reg;
226 } else {
227 set_result(reg);
228 }
229 }
230 }
233 void LIRItem::load_for_store(BasicType type) {
234 if (_gen->can_store_as_constant(value(), type)) {
235 _result = value()->operand();
236 if (!_result->is_constant()) {
237 _result = LIR_OprFact::value_type(value()->type());
238 }
239 } else if (type == T_BYTE || type == T_BOOLEAN) {
240 load_byte_item();
241 } else {
242 load_item();
243 }
244 }
246 void LIRItem::load_item_force(LIR_Opr reg) {
247 LIR_Opr r = result();
248 if (r != reg) {
249 #if !defined(ARM) && !defined(E500V2)
250 if (r->type() != reg->type()) {
251 // moves between different types need an intervening spill slot
252 r = _gen->force_to_spill(r, reg->type());
253 }
254 #endif
255 __ move(r, reg);
256 _result = reg;
257 }
258 }
260 ciObject* LIRItem::get_jobject_constant() const {
261 ObjectType* oc = type()->as_ObjectType();
262 if (oc) {
263 return oc->constant_value();
264 }
265 return NULL;
266 }
269 jint LIRItem::get_jint_constant() const {
270 assert(is_constant() && value() != NULL, "");
271 assert(type()->as_IntConstant() != NULL, "type check");
272 return type()->as_IntConstant()->value();
273 }
276 jint LIRItem::get_address_constant() const {
277 assert(is_constant() && value() != NULL, "");
278 assert(type()->as_AddressConstant() != NULL, "type check");
279 return type()->as_AddressConstant()->value();
280 }
283 jfloat LIRItem::get_jfloat_constant() const {
284 assert(is_constant() && value() != NULL, "");
285 assert(type()->as_FloatConstant() != NULL, "type check");
286 return type()->as_FloatConstant()->value();
287 }
290 jdouble LIRItem::get_jdouble_constant() const {
291 assert(is_constant() && value() != NULL, "");
292 assert(type()->as_DoubleConstant() != NULL, "type check");
293 return type()->as_DoubleConstant()->value();
294 }
297 jlong LIRItem::get_jlong_constant() const {
298 assert(is_constant() && value() != NULL, "");
299 assert(type()->as_LongConstant() != NULL, "type check");
300 return type()->as_LongConstant()->value();
301 }
305 //--------------------------------------------------------------
308 void LIRGenerator::init() {
309 _bs = Universe::heap()->barrier_set();
310 }
313 void LIRGenerator::block_do_prolog(BlockBegin* block) {
314 #ifndef PRODUCT
315 if (PrintIRWithLIR) {
316 block->print();
317 }
318 #endif
320 // set up the list of LIR instructions
321 assert(block->lir() == NULL, "LIR list already computed for this block");
322 _lir = new LIR_List(compilation(), block);
323 block->set_lir(_lir);
325 __ branch_destination(block->label());
327 if (LIRTraceExecution &&
328 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
329 !block->is_set(BlockBegin::exception_entry_flag)) {
330 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
331 trace_block_entry(block);
332 }
333 }
336 void LIRGenerator::block_do_epilog(BlockBegin* block) {
337 #ifndef PRODUCT
338 if (PrintIRWithLIR) {
339 tty->cr();
340 }
341 #endif
343 // LIR_Opr for unpinned constants shouldn't be referenced by other
344 // blocks so clear them out after processing the block.
345 for (int i = 0; i < _unpinned_constants.length(); i++) {
346 _unpinned_constants.at(i)->clear_operand();
347 }
348 _unpinned_constants.trunc_to(0);
350 // clear our any registers for other local constants
351 _constants.trunc_to(0);
352 _reg_for_constants.trunc_to(0);
353 }
356 void LIRGenerator::block_do(BlockBegin* block) {
357 CHECK_BAILOUT();
359 block_do_prolog(block);
360 set_block(block);
362 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
363 if (instr->is_pinned()) do_root(instr);
364 }
366 set_block(NULL);
367 block_do_epilog(block);
368 }
371 //-------------------------LIRGenerator-----------------------------
373 // This is where the tree-walk starts; instr must be root;
374 void LIRGenerator::do_root(Value instr) {
375 CHECK_BAILOUT();
377 InstructionMark im(compilation(), instr);
379 assert(instr->is_pinned(), "use only with roots");
380 assert(instr->subst() == instr, "shouldn't have missed substitution");
382 instr->visit(this);
384 assert(!instr->has_uses() || instr->operand()->is_valid() ||
385 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
386 }
389 // This is called for each node in tree; the walk stops if a root is reached
390 void LIRGenerator::walk(Value instr) {
391 InstructionMark im(compilation(), instr);
392 //stop walk when encounter a root
393 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
394 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
395 } else {
396 assert(instr->subst() == instr, "shouldn't have missed substitution");
397 instr->visit(this);
398 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
399 }
400 }
403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
404 assert(state != NULL, "state must be defined");
406 #ifndef PRODUCT
407 state->verify();
408 #endif
410 ValueStack* s = state;
411 for_each_state(s) {
412 if (s->kind() == ValueStack::EmptyExceptionState) {
413 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
414 continue;
415 }
417 int index;
418 Value value;
419 for_each_stack_value(s, index, value) {
420 assert(value->subst() == value, "missed substitution");
421 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
422 walk(value);
423 assert(value->operand()->is_valid(), "must be evaluated now");
424 }
425 }
427 int bci = s->bci();
428 IRScope* scope = s->scope();
429 ciMethod* method = scope->method();
431 MethodLivenessResult liveness = method->liveness_at_bci(bci);
432 if (bci == SynchronizationEntryBCI) {
433 if (x->as_ExceptionObject() || x->as_Throw()) {
434 // all locals are dead on exit from the synthetic unlocker
435 liveness.clear();
436 } else {
437 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
438 }
439 }
440 if (!liveness.is_valid()) {
441 // Degenerate or breakpointed method.
442 bailout("Degenerate or breakpointed method");
443 } else {
444 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
445 for_each_local_value(s, index, value) {
446 assert(value->subst() == value, "missed substition");
447 if (liveness.at(index) && !value->type()->is_illegal()) {
448 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
449 walk(value);
450 assert(value->operand()->is_valid(), "must be evaluated now");
451 }
452 } else {
453 // NULL out this local so that linear scan can assume that all non-NULL values are live.
454 s->invalidate_local(index);
455 }
456 }
457 }
458 }
460 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
461 }
464 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
465 return state_for(x, x->exception_state());
466 }
469 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) {
470 if (!obj->is_loaded() || PatchALot) {
471 assert(info != NULL, "info must be set if class is not loaded");
472 __ klass2reg_patch(NULL, r, info);
473 } else {
474 // no patching needed
475 __ metadata2reg(obj->constant_encoding(), r);
476 }
477 }
480 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
481 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
482 CodeStub* stub = new RangeCheckStub(range_check_info, index);
483 if (index->is_constant()) {
484 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
485 index->as_jint(), null_check_info);
486 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
487 } else {
488 cmp_reg_mem(lir_cond_aboveEqual, index, array,
489 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
490 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
491 }
492 }
495 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
496 CodeStub* stub = new RangeCheckStub(info, index, true);
497 if (index->is_constant()) {
498 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
499 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
500 } else {
501 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
502 java_nio_Buffer::limit_offset(), T_INT, info);
503 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
504 }
505 __ move(index, result);
506 }
510 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
511 LIR_Opr result_op = result;
512 LIR_Opr left_op = left;
513 LIR_Opr right_op = right;
515 if (TwoOperandLIRForm && left_op != result_op) {
516 assert(right_op != result_op, "malformed");
517 __ move(left_op, result_op);
518 left_op = result_op;
519 }
521 switch(code) {
522 case Bytecodes::_dadd:
523 case Bytecodes::_fadd:
524 case Bytecodes::_ladd:
525 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
526 case Bytecodes::_fmul:
527 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
529 case Bytecodes::_dmul:
530 {
531 if (is_strictfp) {
532 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
533 } else {
534 __ mul(left_op, right_op, result_op); break;
535 }
536 }
537 break;
539 case Bytecodes::_imul:
540 {
541 bool did_strength_reduce = false;
543 if (right->is_constant()) {
544 int c = right->as_jint();
545 if (is_power_of_2(c)) {
546 // do not need tmp here
547 __ shift_left(left_op, exact_log2(c), result_op);
548 did_strength_reduce = true;
549 } else {
550 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
551 }
552 }
553 // we couldn't strength reduce so just emit the multiply
554 if (!did_strength_reduce) {
555 __ mul(left_op, right_op, result_op);
556 }
557 }
558 break;
560 case Bytecodes::_dsub:
561 case Bytecodes::_fsub:
562 case Bytecodes::_lsub:
563 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
565 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
566 // ldiv and lrem are implemented with a direct runtime call
568 case Bytecodes::_ddiv:
569 {
570 if (is_strictfp) {
571 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
572 } else {
573 __ div (left_op, right_op, result_op); break;
574 }
575 }
576 break;
578 case Bytecodes::_drem:
579 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
581 default: ShouldNotReachHere();
582 }
583 }
586 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
587 arithmetic_op(code, result, left, right, false, tmp);
588 }
591 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
592 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
593 }
596 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
597 arithmetic_op(code, result, left, right, is_strictfp, tmp);
598 }
601 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
602 if (TwoOperandLIRForm && value != result_op) {
603 assert(count != result_op, "malformed");
604 __ move(value, result_op);
605 value = result_op;
606 }
608 assert(count->is_constant() || count->is_register(), "must be");
609 switch(code) {
610 case Bytecodes::_ishl:
611 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
612 case Bytecodes::_ishr:
613 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
614 case Bytecodes::_iushr:
615 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
616 default: ShouldNotReachHere();
617 }
618 }
621 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
622 if (TwoOperandLIRForm && left_op != result_op) {
623 assert(right_op != result_op, "malformed");
624 __ move(left_op, result_op);
625 left_op = result_op;
626 }
628 switch(code) {
629 case Bytecodes::_iand:
630 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
632 case Bytecodes::_ior:
633 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
635 case Bytecodes::_ixor:
636 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
638 default: ShouldNotReachHere();
639 }
640 }
643 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
644 if (!GenerateSynchronizationCode) return;
645 // for slow path, use debug info for state after successful locking
646 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
647 __ load_stack_address_monitor(monitor_no, lock);
648 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
649 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
650 }
653 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
654 if (!GenerateSynchronizationCode) return;
655 // setup registers
656 LIR_Opr hdr = lock;
657 lock = new_hdr;
658 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
659 __ load_stack_address_monitor(monitor_no, lock);
660 __ unlock_object(hdr, object, lock, scratch, slow_path);
661 }
664 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
665 klass2reg_with_patching(klass_reg, klass, info);
666 // If klass is not loaded we do not know if the klass has finalizers:
667 if (UseFastNewInstance && klass->is_loaded()
668 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
670 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
672 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
674 assert(klass->is_loaded(), "must be loaded");
675 // allocate space for instance
676 assert(klass->size_helper() >= 0, "illegal instance size");
677 const int instance_size = align_object_size(klass->size_helper());
678 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
679 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
680 } else {
681 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
682 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
683 __ branch_destination(slow_path->continuation());
684 }
685 }
688 static bool is_constant_zero(Instruction* inst) {
689 IntConstant* c = inst->type()->as_IntConstant();
690 if (c) {
691 return (c->value() == 0);
692 }
693 return false;
694 }
697 static bool positive_constant(Instruction* inst) {
698 IntConstant* c = inst->type()->as_IntConstant();
699 if (c) {
700 return (c->value() >= 0);
701 }
702 return false;
703 }
706 static ciArrayKlass* as_array_klass(ciType* type) {
707 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
708 return (ciArrayKlass*)type;
709 } else {
710 return NULL;
711 }
712 }
714 static Value maxvalue(IfOp* ifop) {
715 switch (ifop->cond()) {
716 case If::eql: return NULL;
717 case If::neq: return NULL;
718 case If::lss: // x < y ? x : y
719 case If::leq: // x <= y ? x : y
720 if (ifop->x() == ifop->tval() &&
721 ifop->y() == ifop->fval()) return ifop->y();
722 return NULL;
724 case If::gtr: // x > y ? y : x
725 case If::geq: // x >= y ? y : x
726 if (ifop->x() == ifop->tval() &&
727 ifop->y() == ifop->fval()) return ifop->y();
728 return NULL;
730 }
731 }
733 static ciType* phi_declared_type(Phi* phi) {
734 ciType* t = phi->operand_at(0)->declared_type();
735 if (t == NULL) {
736 return NULL;
737 }
738 for(int i = 1; i < phi->operand_count(); i++) {
739 if (t != phi->operand_at(i)->declared_type()) {
740 return NULL;
741 }
742 }
743 return t;
744 }
746 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
747 Instruction* src = x->argument_at(0);
748 Instruction* src_pos = x->argument_at(1);
749 Instruction* dst = x->argument_at(2);
750 Instruction* dst_pos = x->argument_at(3);
751 Instruction* length = x->argument_at(4);
753 // first try to identify the likely type of the arrays involved
754 ciArrayKlass* expected_type = NULL;
755 bool is_exact = false, src_objarray = false, dst_objarray = false;
756 {
757 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
758 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
759 Phi* phi;
760 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
761 src_declared_type = as_array_klass(phi_declared_type(phi));
762 }
763 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
764 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
765 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
766 dst_declared_type = as_array_klass(phi_declared_type(phi));
767 }
769 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
770 // the types exactly match so the type is fully known
771 is_exact = true;
772 expected_type = src_exact_type;
773 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
774 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
775 ciArrayKlass* src_type = NULL;
776 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
777 src_type = (ciArrayKlass*) src_exact_type;
778 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
779 src_type = (ciArrayKlass*) src_declared_type;
780 }
781 if (src_type != NULL) {
782 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
783 is_exact = true;
784 expected_type = dst_type;
785 }
786 }
787 }
788 // at least pass along a good guess
789 if (expected_type == NULL) expected_type = dst_exact_type;
790 if (expected_type == NULL) expected_type = src_declared_type;
791 if (expected_type == NULL) expected_type = dst_declared_type;
793 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
794 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
795 }
797 // if a probable array type has been identified, figure out if any
798 // of the required checks for a fast case can be elided.
799 int flags = LIR_OpArrayCopy::all_flags;
801 if (!src_objarray)
802 flags &= ~LIR_OpArrayCopy::src_objarray;
803 if (!dst_objarray)
804 flags &= ~LIR_OpArrayCopy::dst_objarray;
806 if (!x->arg_needs_null_check(0))
807 flags &= ~LIR_OpArrayCopy::src_null_check;
808 if (!x->arg_needs_null_check(2))
809 flags &= ~LIR_OpArrayCopy::dst_null_check;
812 if (expected_type != NULL) {
813 Value length_limit = NULL;
815 IfOp* ifop = length->as_IfOp();
816 if (ifop != NULL) {
817 // look for expressions like min(v, a.length) which ends up as
818 // x > y ? y : x or x >= y ? y : x
819 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
820 ifop->x() == ifop->fval() &&
821 ifop->y() == ifop->tval()) {
822 length_limit = ifop->y();
823 }
824 }
826 // try to skip null checks and range checks
827 NewArray* src_array = src->as_NewArray();
828 if (src_array != NULL) {
829 flags &= ~LIR_OpArrayCopy::src_null_check;
830 if (length_limit != NULL &&
831 src_array->length() == length_limit &&
832 is_constant_zero(src_pos)) {
833 flags &= ~LIR_OpArrayCopy::src_range_check;
834 }
835 }
837 NewArray* dst_array = dst->as_NewArray();
838 if (dst_array != NULL) {
839 flags &= ~LIR_OpArrayCopy::dst_null_check;
840 if (length_limit != NULL &&
841 dst_array->length() == length_limit &&
842 is_constant_zero(dst_pos)) {
843 flags &= ~LIR_OpArrayCopy::dst_range_check;
844 }
845 }
847 // check from incoming constant values
848 if (positive_constant(src_pos))
849 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
850 if (positive_constant(dst_pos))
851 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
852 if (positive_constant(length))
853 flags &= ~LIR_OpArrayCopy::length_positive_check;
855 // see if the range check can be elided, which might also imply
856 // that src or dst is non-null.
857 ArrayLength* al = length->as_ArrayLength();
858 if (al != NULL) {
859 if (al->array() == src) {
860 // it's the length of the source array
861 flags &= ~LIR_OpArrayCopy::length_positive_check;
862 flags &= ~LIR_OpArrayCopy::src_null_check;
863 if (is_constant_zero(src_pos))
864 flags &= ~LIR_OpArrayCopy::src_range_check;
865 }
866 if (al->array() == dst) {
867 // it's the length of the destination array
868 flags &= ~LIR_OpArrayCopy::length_positive_check;
869 flags &= ~LIR_OpArrayCopy::dst_null_check;
870 if (is_constant_zero(dst_pos))
871 flags &= ~LIR_OpArrayCopy::dst_range_check;
872 }
873 }
874 if (is_exact) {
875 flags &= ~LIR_OpArrayCopy::type_check;
876 }
877 }
879 IntConstant* src_int = src_pos->type()->as_IntConstant();
880 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
881 if (src_int && dst_int) {
882 int s_offs = src_int->value();
883 int d_offs = dst_int->value();
884 if (src_int->value() >= dst_int->value()) {
885 flags &= ~LIR_OpArrayCopy::overlapping;
886 }
887 if (expected_type != NULL) {
888 BasicType t = expected_type->element_type()->basic_type();
889 int element_size = type2aelembytes(t);
890 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
891 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
892 flags &= ~LIR_OpArrayCopy::unaligned;
893 }
894 }
895 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
896 // src and dest positions are the same, or dst is zero so assume
897 // nonoverlapping copy.
898 flags &= ~LIR_OpArrayCopy::overlapping;
899 }
901 if (src == dst) {
902 // moving within a single array so no type checks are needed
903 if (flags & LIR_OpArrayCopy::type_check) {
904 flags &= ~LIR_OpArrayCopy::type_check;
905 }
906 }
907 *flagsp = flags;
908 *expected_typep = (ciArrayKlass*)expected_type;
909 }
912 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
913 assert(opr->is_register(), "why spill if item is not register?");
915 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
916 LIR_Opr result = new_register(T_FLOAT);
917 set_vreg_flag(result, must_start_in_memory);
918 assert(opr->is_register(), "only a register can be spilled");
919 assert(opr->value_type()->is_float(), "rounding only for floats available");
920 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
921 return result;
922 }
923 return opr;
924 }
927 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
928 assert(type2size[t] == type2size[value->type()],
929 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
930 if (!value->is_register()) {
931 // force into a register
932 LIR_Opr r = new_register(value->type());
933 __ move(value, r);
934 value = r;
935 }
937 // create a spill location
938 LIR_Opr tmp = new_register(t);
939 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
941 // move from register to spill
942 __ move(value, tmp);
943 return tmp;
944 }
946 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
947 if (if_instr->should_profile()) {
948 ciMethod* method = if_instr->profiled_method();
949 assert(method != NULL, "method should be set if branch is profiled");
950 ciMethodData* md = method->method_data_or_null();
951 assert(md != NULL, "Sanity");
952 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
953 assert(data != NULL, "must have profiling data");
954 assert(data->is_BranchData(), "need BranchData for two-way branches");
955 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
956 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
957 if (if_instr->is_swapped()) {
958 int t = taken_count_offset;
959 taken_count_offset = not_taken_count_offset;
960 not_taken_count_offset = t;
961 }
963 LIR_Opr md_reg = new_register(T_METADATA);
964 __ metadata2reg(md->constant_encoding(), md_reg);
966 LIR_Opr data_offset_reg = new_pointer_register();
967 __ cmove(lir_cond(cond),
968 LIR_OprFact::intptrConst(taken_count_offset),
969 LIR_OprFact::intptrConst(not_taken_count_offset),
970 data_offset_reg, as_BasicType(if_instr->x()->type()));
972 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
973 LIR_Opr data_reg = new_pointer_register();
974 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
975 __ move(data_addr, data_reg);
976 // Use leal instead of add to avoid destroying condition codes on x86
977 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
978 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
979 __ move(data_reg, data_addr);
980 }
981 }
983 // Phi technique:
984 // This is about passing live values from one basic block to the other.
985 // In code generated with Java it is rather rare that more than one
986 // value is on the stack from one basic block to the other.
987 // We optimize our technique for efficient passing of one value
988 // (of type long, int, double..) but it can be extended.
989 // When entering or leaving a basic block, all registers and all spill
990 // slots are release and empty. We use the released registers
991 // and spill slots to pass the live values from one block
992 // to the other. The topmost value, i.e., the value on TOS of expression
993 // stack is passed in registers. All other values are stored in spilling
994 // area. Every Phi has an index which designates its spill slot
995 // At exit of a basic block, we fill the register(s) and spill slots.
996 // At entry of a basic block, the block_prolog sets up the content of phi nodes
997 // and locks necessary registers and spilling slots.
1000 // move current value to referenced phi function
1001 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
1002 Phi* phi = sux_val->as_Phi();
1003 // cur_val can be null without phi being null in conjunction with inlining
1004 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1005 LIR_Opr operand = cur_val->operand();
1006 if (cur_val->operand()->is_illegal()) {
1007 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1008 "these can be produced lazily");
1009 operand = operand_for_instruction(cur_val);
1010 }
1011 resolver->move(operand, operand_for_instruction(phi));
1012 }
1013 }
1016 // Moves all stack values into their PHI position
1017 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1018 BlockBegin* bb = block();
1019 if (bb->number_of_sux() == 1) {
1020 BlockBegin* sux = bb->sux_at(0);
1021 assert(sux->number_of_preds() > 0, "invalid CFG");
1023 // a block with only one predecessor never has phi functions
1024 if (sux->number_of_preds() > 1) {
1025 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1026 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1028 ValueStack* sux_state = sux->state();
1029 Value sux_value;
1030 int index;
1032 assert(cur_state->scope() == sux_state->scope(), "not matching");
1033 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1034 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1036 for_each_stack_value(sux_state, index, sux_value) {
1037 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1038 }
1040 for_each_local_value(sux_state, index, sux_value) {
1041 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1042 }
1044 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1045 }
1046 }
1047 }
1050 LIR_Opr LIRGenerator::new_register(BasicType type) {
1051 int vreg = _virtual_register_number;
1052 // add a little fudge factor for the bailout, since the bailout is
1053 // only checked periodically. This gives a few extra registers to
1054 // hand out before we really run out, which helps us keep from
1055 // tripping over assertions.
1056 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1057 bailout("out of virtual registers");
1058 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1059 // wrap it around
1060 _virtual_register_number = LIR_OprDesc::vreg_base;
1061 }
1062 }
1063 _virtual_register_number += 1;
1064 return LIR_OprFact::virtual_register(vreg, type);
1065 }
1068 // Try to lock using register in hint
1069 LIR_Opr LIRGenerator::rlock(Value instr) {
1070 return new_register(instr->type());
1071 }
1074 // does an rlock and sets result
1075 LIR_Opr LIRGenerator::rlock_result(Value x) {
1076 LIR_Opr reg = rlock(x);
1077 set_result(x, reg);
1078 return reg;
1079 }
1082 // does an rlock and sets result
1083 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1084 LIR_Opr reg;
1085 switch (type) {
1086 case T_BYTE:
1087 case T_BOOLEAN:
1088 reg = rlock_byte(type);
1089 break;
1090 default:
1091 reg = rlock(x);
1092 break;
1093 }
1095 set_result(x, reg);
1096 return reg;
1097 }
1100 //---------------------------------------------------------------------
1101 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1102 ObjectType* oc = value->type()->as_ObjectType();
1103 if (oc) {
1104 return oc->constant_value();
1105 }
1106 return NULL;
1107 }
1110 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1111 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1112 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1114 // no moves are created for phi functions at the begin of exception
1115 // handlers, so assign operands manually here
1116 for_each_phi_fun(block(), phi,
1117 operand_for_instruction(phi));
1119 LIR_Opr thread_reg = getThreadPointer();
1120 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1121 exceptionOopOpr());
1122 __ move_wide(LIR_OprFact::oopConst(NULL),
1123 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1124 __ move_wide(LIR_OprFact::oopConst(NULL),
1125 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1127 LIR_Opr result = new_register(T_OBJECT);
1128 __ move(exceptionOopOpr(), result);
1129 set_result(x, result);
1130 }
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 //----------------------------------------------------------------------
1137 // visitor functions
1138 //----------------------------------------------------------------------
1139 //----------------------------------------------------------------------
1140 //----------------------------------------------------------------------
1141 //----------------------------------------------------------------------
1143 void LIRGenerator::do_Phi(Phi* x) {
1144 // phi functions are never visited directly
1145 ShouldNotReachHere();
1146 }
1149 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1150 void LIRGenerator::do_Constant(Constant* x) {
1151 if (x->state_before() != NULL) {
1152 // Any constant with a ValueStack requires patching so emit the patch here
1153 LIR_Opr reg = rlock_result(x);
1154 CodeEmitInfo* info = state_for(x, x->state_before());
1155 __ oop2reg_patch(NULL, reg, info);
1156 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1157 if (!x->is_pinned()) {
1158 // unpinned constants are handled specially so that they can be
1159 // put into registers when they are used multiple times within a
1160 // block. After the block completes their operand will be
1161 // cleared so that other blocks can't refer to that register.
1162 set_result(x, load_constant(x));
1163 } else {
1164 LIR_Opr res = x->operand();
1165 if (!res->is_valid()) {
1166 res = LIR_OprFact::value_type(x->type());
1167 }
1168 if (res->is_constant()) {
1169 LIR_Opr reg = rlock_result(x);
1170 __ move(res, reg);
1171 } else {
1172 set_result(x, res);
1173 }
1174 }
1175 } else {
1176 set_result(x, LIR_OprFact::value_type(x->type()));
1177 }
1178 }
1181 void LIRGenerator::do_Local(Local* x) {
1182 // operand_for_instruction has the side effect of setting the result
1183 // so there's no need to do it here.
1184 operand_for_instruction(x);
1185 }
1188 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1189 Unimplemented();
1190 }
1193 void LIRGenerator::do_Return(Return* x) {
1194 if (compilation()->env()->dtrace_method_probes()) {
1195 BasicTypeList signature;
1196 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1197 signature.append(T_OBJECT); // Method*
1198 LIR_OprList* args = new LIR_OprList();
1199 args->append(getThreadPointer());
1200 LIR_Opr meth = new_register(T_METADATA);
1201 __ metadata2reg(method()->constant_encoding(), meth);
1202 args->append(meth);
1203 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1204 }
1206 if (x->type()->is_void()) {
1207 __ return_op(LIR_OprFact::illegalOpr);
1208 } else {
1209 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1210 LIRItem result(x->result(), this);
1212 result.load_item_force(reg);
1213 __ return_op(result.result());
1214 }
1215 set_no_result(x);
1216 }
1218 // Examble: ref.get()
1219 // Combination of LoadField and g1 pre-write barrier
1220 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1222 const int referent_offset = java_lang_ref_Reference::referent_offset;
1223 guarantee(referent_offset > 0, "referent offset not initialized");
1225 assert(x->number_of_arguments() == 1, "wrong type");
1227 LIRItem reference(x->argument_at(0), this);
1228 reference.load_item();
1230 // need to perform the null check on the reference objecy
1231 CodeEmitInfo* info = NULL;
1232 if (x->needs_null_check()) {
1233 info = state_for(x);
1234 }
1236 LIR_Address* referent_field_adr =
1237 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1239 LIR_Opr result = rlock_result(x);
1241 __ load(referent_field_adr, result, info);
1243 // Register the value in the referent field with the pre-barrier
1244 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1245 result /* pre_val */,
1246 false /* do_load */,
1247 false /* patch */,
1248 NULL /* info */);
1249 }
1251 // Example: clazz.isInstance(object)
1252 void LIRGenerator::do_isInstance(Intrinsic* x) {
1253 assert(x->number_of_arguments() == 2, "wrong type");
1255 // TODO could try to substitute this node with an equivalent InstanceOf
1256 // if clazz is known to be a constant Class. This will pick up newly found
1257 // constants after HIR construction. I'll leave this to a future change.
1259 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1260 // could follow the aastore example in a future change.
1262 LIRItem clazz(x->argument_at(0), this);
1263 LIRItem object(x->argument_at(1), this);
1264 clazz.load_item();
1265 object.load_item();
1266 LIR_Opr result = rlock_result(x);
1268 // need to perform null check on clazz
1269 if (x->needs_null_check()) {
1270 CodeEmitInfo* info = state_for(x);
1271 __ null_check(clazz.result(), info);
1272 }
1274 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1275 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1276 x->type(),
1277 NULL); // NULL CodeEmitInfo results in a leaf call
1278 __ move(call_result, result);
1279 }
1281 // Example: object.getClass ()
1282 void LIRGenerator::do_getClass(Intrinsic* x) {
1283 assert(x->number_of_arguments() == 1, "wrong type");
1285 LIRItem rcvr(x->argument_at(0), this);
1286 rcvr.load_item();
1287 LIR_Opr result = rlock_result(x);
1289 // need to perform the null check on the rcvr
1290 CodeEmitInfo* info = NULL;
1291 if (x->needs_null_check()) {
1292 info = state_for(x);
1293 }
1294 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info);
1295 __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1296 }
1299 // Example: Thread.currentThread()
1300 void LIRGenerator::do_currentThread(Intrinsic* x) {
1301 assert(x->number_of_arguments() == 0, "wrong type");
1302 LIR_Opr reg = rlock_result(x);
1303 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1304 }
1307 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1308 assert(x->number_of_arguments() == 1, "wrong type");
1309 LIRItem receiver(x->argument_at(0), this);
1311 receiver.load_item();
1312 BasicTypeList signature;
1313 signature.append(T_OBJECT); // receiver
1314 LIR_OprList* args = new LIR_OprList();
1315 args->append(receiver.result());
1316 CodeEmitInfo* info = state_for(x, x->state());
1317 call_runtime(&signature, args,
1318 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1319 voidType, info);
1321 set_no_result(x);
1322 }
1325 //------------------------local access--------------------------------------
1327 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1328 if (x->operand()->is_illegal()) {
1329 Constant* c = x->as_Constant();
1330 if (c != NULL) {
1331 x->set_operand(LIR_OprFact::value_type(c->type()));
1332 } else {
1333 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1334 // allocate a virtual register for this local or phi
1335 x->set_operand(rlock(x));
1336 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1337 }
1338 }
1339 return x->operand();
1340 }
1343 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1344 if (opr->is_virtual()) {
1345 return instruction_for_vreg(opr->vreg_number());
1346 }
1347 return NULL;
1348 }
1351 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1352 if (reg_num < _instruction_for_operand.length()) {
1353 return _instruction_for_operand.at(reg_num);
1354 }
1355 return NULL;
1356 }
1359 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1360 if (_vreg_flags.size_in_bits() == 0) {
1361 BitMap2D temp(100, num_vreg_flags);
1362 temp.clear();
1363 _vreg_flags = temp;
1364 }
1365 _vreg_flags.at_put_grow(vreg_num, f, true);
1366 }
1368 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1369 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1370 return false;
1371 }
1372 return _vreg_flags.at(vreg_num, f);
1373 }
1376 // Block local constant handling. This code is useful for keeping
1377 // unpinned constants and constants which aren't exposed in the IR in
1378 // registers. Unpinned Constant instructions have their operands
1379 // cleared when the block is finished so that other blocks can't end
1380 // up referring to their registers.
1382 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1383 assert(!x->is_pinned(), "only for unpinned constants");
1384 _unpinned_constants.append(x);
1385 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1386 }
1389 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1390 BasicType t = c->type();
1391 for (int i = 0; i < _constants.length(); i++) {
1392 LIR_Const* other = _constants.at(i);
1393 if (t == other->type()) {
1394 switch (t) {
1395 case T_INT:
1396 case T_FLOAT:
1397 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1398 break;
1399 case T_LONG:
1400 case T_DOUBLE:
1401 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1402 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1403 break;
1404 case T_OBJECT:
1405 if (c->as_jobject() != other->as_jobject()) continue;
1406 break;
1407 }
1408 return _reg_for_constants.at(i);
1409 }
1410 }
1412 LIR_Opr result = new_register(t);
1413 __ move((LIR_Opr)c, result);
1414 _constants.append(c);
1415 _reg_for_constants.append(result);
1416 return result;
1417 }
1419 // Various barriers
1421 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1422 bool do_load, bool patch, CodeEmitInfo* info) {
1423 // Do the pre-write barrier, if any.
1424 switch (_bs->kind()) {
1425 #if INCLUDE_ALL_GCS
1426 case BarrierSet::G1SATBCT:
1427 case BarrierSet::G1SATBCTLogging:
1428 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1429 break;
1430 #endif // INCLUDE_ALL_GCS
1431 case BarrierSet::CardTableModRef:
1432 case BarrierSet::CardTableExtension:
1433 // No pre barriers
1434 break;
1435 case BarrierSet::ModRef:
1436 case BarrierSet::Other:
1437 // No pre barriers
1438 break;
1439 default :
1440 ShouldNotReachHere();
1442 }
1443 }
1445 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1446 switch (_bs->kind()) {
1447 #if INCLUDE_ALL_GCS
1448 case BarrierSet::G1SATBCT:
1449 case BarrierSet::G1SATBCTLogging:
1450 G1SATBCardTableModRef_post_barrier(addr, new_val);
1451 break;
1452 #endif // INCLUDE_ALL_GCS
1453 case BarrierSet::CardTableModRef:
1454 case BarrierSet::CardTableExtension:
1455 CardTableModRef_post_barrier(addr, new_val);
1456 break;
1457 case BarrierSet::ModRef:
1458 case BarrierSet::Other:
1459 // No post barriers
1460 break;
1461 default :
1462 ShouldNotReachHere();
1463 }
1464 }
1466 ////////////////////////////////////////////////////////////////////////
1467 #if INCLUDE_ALL_GCS
1469 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1470 bool do_load, bool patch, CodeEmitInfo* info) {
1471 // First we test whether marking is in progress.
1472 BasicType flag_type;
1473 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1474 flag_type = T_INT;
1475 } else {
1476 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1477 "Assumption");
1478 flag_type = T_BYTE;
1479 }
1480 LIR_Opr thrd = getThreadPointer();
1481 LIR_Address* mark_active_flag_addr =
1482 new LIR_Address(thrd,
1483 in_bytes(JavaThread::satb_mark_queue_offset() +
1484 PtrQueue::byte_offset_of_active()),
1485 flag_type);
1486 // Read the marking-in-progress flag.
1487 LIR_Opr flag_val = new_register(T_INT);
1488 __ load(mark_active_flag_addr, flag_val);
1489 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1491 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1493 CodeStub* slow;
1495 if (do_load) {
1496 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1497 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1499 if (patch)
1500 pre_val_patch_code = lir_patch_normal;
1502 pre_val = new_register(T_OBJECT);
1504 if (!addr_opr->is_address()) {
1505 assert(addr_opr->is_register(), "must be");
1506 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1507 }
1508 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1509 } else {
1510 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1511 assert(pre_val->is_register(), "must be");
1512 assert(pre_val->type() == T_OBJECT, "must be an object");
1513 assert(info == NULL, "sanity");
1515 slow = new G1PreBarrierStub(pre_val);
1516 }
1518 __ branch(lir_cond_notEqual, T_INT, slow);
1519 __ branch_destination(slow->continuation());
1520 }
1522 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1523 // If the "new_val" is a constant NULL, no barrier is necessary.
1524 if (new_val->is_constant() &&
1525 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1527 if (!new_val->is_register()) {
1528 LIR_Opr new_val_reg = new_register(T_OBJECT);
1529 if (new_val->is_constant()) {
1530 __ move(new_val, new_val_reg);
1531 } else {
1532 __ leal(new_val, new_val_reg);
1533 }
1534 new_val = new_val_reg;
1535 }
1536 assert(new_val->is_register(), "must be a register at this point");
1538 if (addr->is_address()) {
1539 LIR_Address* address = addr->as_address_ptr();
1540 LIR_Opr ptr = new_pointer_register();
1541 if (!address->index()->is_valid() && address->disp() == 0) {
1542 __ move(address->base(), ptr);
1543 } else {
1544 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1545 __ leal(addr, ptr);
1546 }
1547 addr = ptr;
1548 }
1549 assert(addr->is_register(), "must be a register at this point");
1551 LIR_Opr xor_res = new_pointer_register();
1552 LIR_Opr xor_shift_res = new_pointer_register();
1553 if (TwoOperandLIRForm ) {
1554 __ move(addr, xor_res);
1555 __ logical_xor(xor_res, new_val, xor_res);
1556 __ move(xor_res, xor_shift_res);
1557 __ unsigned_shift_right(xor_shift_res,
1558 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1559 xor_shift_res,
1560 LIR_OprDesc::illegalOpr());
1561 } else {
1562 __ logical_xor(addr, new_val, xor_res);
1563 __ unsigned_shift_right(xor_res,
1564 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1565 xor_shift_res,
1566 LIR_OprDesc::illegalOpr());
1567 }
1569 if (!new_val->is_register()) {
1570 LIR_Opr new_val_reg = new_register(T_OBJECT);
1571 __ leal(new_val, new_val_reg);
1572 new_val = new_val_reg;
1573 }
1574 assert(new_val->is_register(), "must be a register at this point");
1576 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1578 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1579 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1580 __ branch_destination(slow->continuation());
1581 }
1583 #endif // INCLUDE_ALL_GCS
1584 ////////////////////////////////////////////////////////////////////////
1586 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1588 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1589 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1590 if (addr->is_address()) {
1591 LIR_Address* address = addr->as_address_ptr();
1592 // ptr cannot be an object because we use this barrier for array card marks
1593 // and addr can point in the middle of an array.
1594 LIR_Opr ptr = new_pointer_register();
1595 if (!address->index()->is_valid() && address->disp() == 0) {
1596 __ move(address->base(), ptr);
1597 } else {
1598 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1599 __ leal(addr, ptr);
1600 }
1601 addr = ptr;
1602 }
1603 assert(addr->is_register(), "must be a register at this point");
1605 #ifdef ARM
1606 // TODO: ARM - move to platform-dependent code
1607 LIR_Opr tmp = FrameMap::R14_opr;
1608 if (VM_Version::supports_movw()) {
1609 __ move((LIR_Opr)card_table_base, tmp);
1610 } else {
1611 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1612 }
1614 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1615 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1616 if(((int)ct->byte_map_base & 0xff) == 0) {
1617 __ move(tmp, card_addr);
1618 } else {
1619 LIR_Opr tmp_zero = new_register(T_INT);
1620 __ move(LIR_OprFact::intConst(0), tmp_zero);
1621 __ move(tmp_zero, card_addr);
1622 }
1623 #else // ARM
1624 LIR_Opr tmp = new_pointer_register();
1625 if (TwoOperandLIRForm) {
1626 __ move(addr, tmp);
1627 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1628 } else {
1629 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1630 }
1631 if (can_inline_as_constant(card_table_base)) {
1632 __ move(LIR_OprFact::intConst(0),
1633 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1634 } else {
1635 __ move(LIR_OprFact::intConst(0),
1636 new LIR_Address(tmp, load_constant(card_table_base),
1637 T_BYTE));
1638 }
1639 #endif // ARM
1640 }
1643 //------------------------field access--------------------------------------
1645 // Comment copied form templateTable_i486.cpp
1646 // ----------------------------------------------------------------------------
1647 // Volatile variables demand their effects be made known to all CPU's in
1648 // order. Store buffers on most chips allow reads & writes to reorder; the
1649 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1650 // memory barrier (i.e., it's not sufficient that the interpreter does not
1651 // reorder volatile references, the hardware also must not reorder them).
1652 //
1653 // According to the new Java Memory Model (JMM):
1654 // (1) All volatiles are serialized wrt to each other.
1655 // ALSO reads & writes act as aquire & release, so:
1656 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1657 // the read float up to before the read. It's OK for non-volatile memory refs
1658 // that happen before the volatile read to float down below it.
1659 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1660 // that happen BEFORE the write float down to after the write. It's OK for
1661 // non-volatile memory refs that happen after the volatile write to float up
1662 // before it.
1663 //
1664 // We only put in barriers around volatile refs (they are expensive), not
1665 // _between_ memory refs (that would require us to track the flavor of the
1666 // previous memory refs). Requirements (2) and (3) require some barriers
1667 // before volatile stores and after volatile loads. These nearly cover
1668 // requirement (1) but miss the volatile-store-volatile-load case. This final
1669 // case is placed after volatile-stores although it could just as well go
1670 // before volatile-loads.
1673 void LIRGenerator::do_StoreField(StoreField* x) {
1674 bool needs_patching = x->needs_patching();
1675 bool is_volatile = x->field()->is_volatile();
1676 BasicType field_type = x->field_type();
1677 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1679 CodeEmitInfo* info = NULL;
1680 if (needs_patching) {
1681 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1682 info = state_for(x, x->state_before());
1683 } else if (x->needs_null_check()) {
1684 NullCheck* nc = x->explicit_null_check();
1685 if (nc == NULL) {
1686 info = state_for(x);
1687 } else {
1688 info = state_for(nc);
1689 }
1690 }
1693 LIRItem object(x->obj(), this);
1694 LIRItem value(x->value(), this);
1696 object.load_item();
1698 if (is_volatile || needs_patching) {
1699 // load item if field is volatile (fewer special cases for volatiles)
1700 // load item if field not initialized
1701 // load item if field not constant
1702 // because of code patching we cannot inline constants
1703 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1704 value.load_byte_item();
1705 } else {
1706 value.load_item();
1707 }
1708 } else {
1709 value.load_for_store(field_type);
1710 }
1712 set_no_result(x);
1714 #ifndef PRODUCT
1715 if (PrintNotLoaded && needs_patching) {
1716 tty->print_cr(" ###class not loaded at store_%s bci %d",
1717 x->is_static() ? "static" : "field", x->printable_bci());
1718 }
1719 #endif
1721 if (x->needs_null_check() &&
1722 (needs_patching ||
1723 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1724 // emit an explicit null check because the offset is too large
1725 __ null_check(object.result(), new CodeEmitInfo(info));
1726 }
1728 LIR_Address* address;
1729 if (needs_patching) {
1730 // we need to patch the offset in the instruction so don't allow
1731 // generate_address to try to be smart about emitting the -1.
1732 // Otherwise the patching code won't know how to find the
1733 // instruction to patch.
1734 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1735 } else {
1736 address = generate_address(object.result(), x->offset(), field_type);
1737 }
1739 if (is_volatile && os::is_MP()) {
1740 __ membar_release();
1741 }
1743 if (is_oop) {
1744 // Do the pre-write barrier, if any.
1745 pre_barrier(LIR_OprFact::address(address),
1746 LIR_OprFact::illegalOpr /* pre_val */,
1747 true /* do_load*/,
1748 needs_patching,
1749 (info ? new CodeEmitInfo(info) : NULL));
1750 }
1752 if (is_volatile && !needs_patching) {
1753 volatile_field_store(value.result(), address, info);
1754 } else {
1755 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1756 __ store(value.result(), address, info, patch_code);
1757 }
1759 if (is_oop) {
1760 // Store to object so mark the card of the header
1761 post_barrier(object.result(), value.result());
1762 }
1764 if (is_volatile && os::is_MP()) {
1765 __ membar();
1766 }
1767 }
1770 void LIRGenerator::do_LoadField(LoadField* x) {
1771 bool needs_patching = x->needs_patching();
1772 bool is_volatile = x->field()->is_volatile();
1773 BasicType field_type = x->field_type();
1775 CodeEmitInfo* info = NULL;
1776 if (needs_patching) {
1777 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1778 info = state_for(x, x->state_before());
1779 } else if (x->needs_null_check()) {
1780 NullCheck* nc = x->explicit_null_check();
1781 if (nc == NULL) {
1782 info = state_for(x);
1783 } else {
1784 info = state_for(nc);
1785 }
1786 }
1788 LIRItem object(x->obj(), this);
1790 object.load_item();
1792 #ifndef PRODUCT
1793 if (PrintNotLoaded && needs_patching) {
1794 tty->print_cr(" ###class not loaded at load_%s bci %d",
1795 x->is_static() ? "static" : "field", x->printable_bci());
1796 }
1797 #endif
1799 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1800 if (x->needs_null_check() &&
1801 (needs_patching ||
1802 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1803 stress_deopt)) {
1804 LIR_Opr obj = object.result();
1805 if (stress_deopt) {
1806 obj = new_register(T_OBJECT);
1807 __ move(LIR_OprFact::oopConst(NULL), obj);
1808 }
1809 // emit an explicit null check because the offset is too large
1810 __ null_check(obj, new CodeEmitInfo(info));
1811 }
1813 LIR_Opr reg = rlock_result(x, field_type);
1814 LIR_Address* address;
1815 if (needs_patching) {
1816 // we need to patch the offset in the instruction so don't allow
1817 // generate_address to try to be smart about emitting the -1.
1818 // Otherwise the patching code won't know how to find the
1819 // instruction to patch.
1820 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1821 } else {
1822 address = generate_address(object.result(), x->offset(), field_type);
1823 }
1825 if (is_volatile && !needs_patching) {
1826 volatile_field_load(address, reg, info);
1827 } else {
1828 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1829 __ load(address, reg, info, patch_code);
1830 }
1832 if (is_volatile && os::is_MP()) {
1833 __ membar_acquire();
1834 }
1835 }
1838 //------------------------java.nio.Buffer.checkIndex------------------------
1840 // int java.nio.Buffer.checkIndex(int)
1841 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1842 // NOTE: by the time we are in checkIndex() we are guaranteed that
1843 // the buffer is non-null (because checkIndex is package-private and
1844 // only called from within other methods in the buffer).
1845 assert(x->number_of_arguments() == 2, "wrong type");
1846 LIRItem buf (x->argument_at(0), this);
1847 LIRItem index(x->argument_at(1), this);
1848 buf.load_item();
1849 index.load_item();
1851 LIR_Opr result = rlock_result(x);
1852 if (GenerateRangeChecks) {
1853 CodeEmitInfo* info = state_for(x);
1854 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1855 if (index.result()->is_constant()) {
1856 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1857 __ branch(lir_cond_belowEqual, T_INT, stub);
1858 } else {
1859 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1860 java_nio_Buffer::limit_offset(), T_INT, info);
1861 __ branch(lir_cond_aboveEqual, T_INT, stub);
1862 }
1863 __ move(index.result(), result);
1864 } else {
1865 // Just load the index into the result register
1866 __ move(index.result(), result);
1867 }
1868 }
1871 //------------------------array access--------------------------------------
1874 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1875 if (x->use_count() == 0 && !x->can_trap()) return;
1877 LIRItem array(x->array(), this);
1878 array.load_item();
1879 LIR_Opr reg = rlock_result(x);
1881 CodeEmitInfo* info = NULL;
1882 if (x->needs_null_check()) {
1883 NullCheck* nc = x->explicit_null_check();
1884 if (nc == NULL) {
1885 info = state_for(x);
1886 } else {
1887 info = state_for(nc);
1888 }
1889 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1890 LIR_Opr obj = new_register(T_OBJECT);
1891 __ move(LIR_OprFact::oopConst(NULL), obj);
1892 __ null_check(obj, new CodeEmitInfo(info));
1893 }
1894 }
1895 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1896 }
1899 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1900 bool use_length = x->length() != NULL;
1901 LIRItem array(x->array(), this);
1902 LIRItem index(x->index(), this);
1903 LIRItem length(this);
1904 bool needs_range_check = x->compute_needs_range_check();
1906 if (use_length && needs_range_check) {
1907 length.set_instruction(x->length());
1908 length.load_item();
1909 }
1911 array.load_item();
1912 if (index.is_constant() && can_inline_as_constant(x->index())) {
1913 // let it be a constant
1914 index.dont_load_item();
1915 } else {
1916 index.load_item();
1917 }
1919 CodeEmitInfo* range_check_info = state_for(x);
1920 CodeEmitInfo* null_check_info = NULL;
1921 if (x->needs_null_check()) {
1922 NullCheck* nc = x->explicit_null_check();
1923 if (nc != NULL) {
1924 null_check_info = state_for(nc);
1925 } else {
1926 null_check_info = range_check_info;
1927 }
1928 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1929 LIR_Opr obj = new_register(T_OBJECT);
1930 __ move(LIR_OprFact::oopConst(NULL), obj);
1931 __ null_check(obj, new CodeEmitInfo(null_check_info));
1932 }
1933 }
1935 // emit array address setup early so it schedules better
1936 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1938 if (GenerateRangeChecks && needs_range_check) {
1939 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1940 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1941 } else if (use_length) {
1942 // TODO: use a (modified) version of array_range_check that does not require a
1943 // constant length to be loaded to a register
1944 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1945 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1946 } else {
1947 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1948 // The range check performs the null check, so clear it out for the load
1949 null_check_info = NULL;
1950 }
1951 }
1953 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1954 }
1957 void LIRGenerator::do_NullCheck(NullCheck* x) {
1958 if (x->can_trap()) {
1959 LIRItem value(x->obj(), this);
1960 value.load_item();
1961 CodeEmitInfo* info = state_for(x);
1962 __ null_check(value.result(), info);
1963 }
1964 }
1967 void LIRGenerator::do_TypeCast(TypeCast* x) {
1968 LIRItem value(x->obj(), this);
1969 value.load_item();
1970 // the result is the same as from the node we are casting
1971 set_result(x, value.result());
1972 }
1975 void LIRGenerator::do_Throw(Throw* x) {
1976 LIRItem exception(x->exception(), this);
1977 exception.load_item();
1978 set_no_result(x);
1979 LIR_Opr exception_opr = exception.result();
1980 CodeEmitInfo* info = state_for(x, x->state());
1982 #ifndef PRODUCT
1983 if (PrintC1Statistics) {
1984 increment_counter(Runtime1::throw_count_address(), T_INT);
1985 }
1986 #endif
1988 // check if the instruction has an xhandler in any of the nested scopes
1989 bool unwind = false;
1990 if (info->exception_handlers()->length() == 0) {
1991 // this throw is not inside an xhandler
1992 unwind = true;
1993 } else {
1994 // get some idea of the throw type
1995 bool type_is_exact = true;
1996 ciType* throw_type = x->exception()->exact_type();
1997 if (throw_type == NULL) {
1998 type_is_exact = false;
1999 throw_type = x->exception()->declared_type();
2000 }
2001 if (throw_type != NULL && throw_type->is_instance_klass()) {
2002 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2003 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2004 }
2005 }
2007 // do null check before moving exception oop into fixed register
2008 // to avoid a fixed interval with an oop during the null check.
2009 // Use a copy of the CodeEmitInfo because debug information is
2010 // different for null_check and throw.
2011 if (GenerateCompilerNullChecks &&
2012 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2013 // if the exception object wasn't created using new then it might be null.
2014 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2015 }
2017 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2018 // we need to go through the exception lookup path to get JVMTI
2019 // notification done
2020 unwind = false;
2021 }
2023 // move exception oop into fixed register
2024 __ move(exception_opr, exceptionOopOpr());
2026 if (unwind) {
2027 __ unwind_exception(exceptionOopOpr());
2028 } else {
2029 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2030 }
2031 }
2034 void LIRGenerator::do_RoundFP(RoundFP* x) {
2035 LIRItem input(x->input(), this);
2036 input.load_item();
2037 LIR_Opr input_opr = input.result();
2038 assert(input_opr->is_register(), "why round if value is not in a register?");
2039 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2040 if (input_opr->is_single_fpu()) {
2041 set_result(x, round_item(input_opr)); // This code path not currently taken
2042 } else {
2043 LIR_Opr result = new_register(T_DOUBLE);
2044 set_vreg_flag(result, must_start_in_memory);
2045 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2046 set_result(x, result);
2047 }
2048 }
2050 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2051 LIRItem base(x->base(), this);
2052 LIRItem idx(this);
2054 base.load_item();
2055 if (x->has_index()) {
2056 idx.set_instruction(x->index());
2057 idx.load_nonconstant();
2058 }
2060 LIR_Opr reg = rlock_result(x, x->basic_type());
2062 int log2_scale = 0;
2063 if (x->has_index()) {
2064 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2065 log2_scale = x->log2_scale();
2066 }
2068 assert(!x->has_index() || idx.value() == x->index(), "should match");
2070 LIR_Opr base_op = base.result();
2071 #ifndef _LP64
2072 if (x->base()->type()->tag() == longTag) {
2073 base_op = new_register(T_INT);
2074 __ convert(Bytecodes::_l2i, base.result(), base_op);
2075 } else {
2076 assert(x->base()->type()->tag() == intTag, "must be");
2077 }
2078 #endif
2080 BasicType dst_type = x->basic_type();
2081 LIR_Opr index_op = idx.result();
2083 LIR_Address* addr;
2084 if (index_op->is_constant()) {
2085 assert(log2_scale == 0, "must not have a scale");
2086 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2087 } else {
2088 #ifdef X86
2089 #ifdef _LP64
2090 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2091 LIR_Opr tmp = new_pointer_register();
2092 __ convert(Bytecodes::_i2l, index_op, tmp);
2093 index_op = tmp;
2094 }
2095 #endif
2096 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2097 #elif defined(ARM)
2098 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2099 #else
2100 if (index_op->is_illegal() || log2_scale == 0) {
2101 #ifdef _LP64
2102 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2103 LIR_Opr tmp = new_pointer_register();
2104 __ convert(Bytecodes::_i2l, index_op, tmp);
2105 index_op = tmp;
2106 }
2107 #endif
2108 addr = new LIR_Address(base_op, index_op, dst_type);
2109 } else {
2110 LIR_Opr tmp = new_pointer_register();
2111 __ shift_left(index_op, log2_scale, tmp);
2112 addr = new LIR_Address(base_op, tmp, dst_type);
2113 }
2114 #endif
2115 }
2117 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2118 __ unaligned_move(addr, reg);
2119 } else {
2120 if (dst_type == T_OBJECT && x->is_wide()) {
2121 __ move_wide(addr, reg);
2122 } else {
2123 __ move(addr, reg);
2124 }
2125 }
2126 }
2129 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2130 int log2_scale = 0;
2131 BasicType type = x->basic_type();
2133 if (x->has_index()) {
2134 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2135 log2_scale = x->log2_scale();
2136 }
2138 LIRItem base(x->base(), this);
2139 LIRItem value(x->value(), this);
2140 LIRItem idx(this);
2142 base.load_item();
2143 if (x->has_index()) {
2144 idx.set_instruction(x->index());
2145 idx.load_item();
2146 }
2148 if (type == T_BYTE || type == T_BOOLEAN) {
2149 value.load_byte_item();
2150 } else {
2151 value.load_item();
2152 }
2154 set_no_result(x);
2156 LIR_Opr base_op = base.result();
2157 #ifndef _LP64
2158 if (x->base()->type()->tag() == longTag) {
2159 base_op = new_register(T_INT);
2160 __ convert(Bytecodes::_l2i, base.result(), base_op);
2161 } else {
2162 assert(x->base()->type()->tag() == intTag, "must be");
2163 }
2164 #endif
2166 LIR_Opr index_op = idx.result();
2167 if (log2_scale != 0) {
2168 // temporary fix (platform dependent code without shift on Intel would be better)
2169 index_op = new_pointer_register();
2170 #ifdef _LP64
2171 if(idx.result()->type() == T_INT) {
2172 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2173 } else {
2174 #endif
2175 // TODO: ARM also allows embedded shift in the address
2176 __ move(idx.result(), index_op);
2177 #ifdef _LP64
2178 }
2179 #endif
2180 __ shift_left(index_op, log2_scale, index_op);
2181 }
2182 #ifdef _LP64
2183 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
2184 LIR_Opr tmp = new_pointer_register();
2185 __ convert(Bytecodes::_i2l, index_op, tmp);
2186 index_op = tmp;
2187 }
2188 #endif
2190 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2191 __ move(value.result(), addr);
2192 }
2195 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2196 BasicType type = x->basic_type();
2197 LIRItem src(x->object(), this);
2198 LIRItem off(x->offset(), this);
2200 off.load_item();
2201 src.load_item();
2203 LIR_Opr value = rlock_result(x, x->basic_type());
2205 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2207 #if INCLUDE_ALL_GCS
2208 // We might be reading the value of the referent field of a
2209 // Reference object in order to attach it back to the live
2210 // object graph. If G1 is enabled then we need to record
2211 // the value that is being returned in an SATB log buffer.
2212 //
2213 // We need to generate code similar to the following...
2214 //
2215 // if (offset == java_lang_ref_Reference::referent_offset) {
2216 // if (src != NULL) {
2217 // if (klass(src)->reference_type() != REF_NONE) {
2218 // pre_barrier(..., value, ...);
2219 // }
2220 // }
2221 // }
2223 if (UseG1GC && type == T_OBJECT) {
2224 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2225 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2226 bool gen_source_check = true; // Assume we need to check the src object for null.
2227 bool gen_type_check = true; // Assume we need to check the reference_type.
2229 if (off.is_constant()) {
2230 jlong off_con = (off.type()->is_int() ?
2231 (jlong) off.get_jint_constant() :
2232 off.get_jlong_constant());
2235 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2236 // The constant offset is something other than referent_offset.
2237 // We can skip generating/checking the remaining guards and
2238 // skip generation of the code stub.
2239 gen_pre_barrier = false;
2240 } else {
2241 // The constant offset is the same as referent_offset -
2242 // we do not need to generate a runtime offset check.
2243 gen_offset_check = false;
2244 }
2245 }
2247 // We don't need to generate stub if the source object is an array
2248 if (gen_pre_barrier && src.type()->is_array()) {
2249 gen_pre_barrier = false;
2250 }
2252 if (gen_pre_barrier) {
2253 // We still need to continue with the checks.
2254 if (src.is_constant()) {
2255 ciObject* src_con = src.get_jobject_constant();
2257 if (src_con->is_null_object()) {
2258 // The constant src object is null - We can skip
2259 // generating the code stub.
2260 gen_pre_barrier = false;
2261 } else {
2262 // Non-null constant source object. We still have to generate
2263 // the slow stub - but we don't need to generate the runtime
2264 // null object check.
2265 gen_source_check = false;
2266 }
2267 }
2268 }
2269 if (gen_pre_barrier && !PatchALot) {
2270 // Can the klass of object be statically determined to be
2271 // a sub-class of Reference?
2272 ciType* type = src.value()->declared_type();
2273 if ((type != NULL) && type->is_loaded()) {
2274 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2275 gen_type_check = false;
2276 } else if (type->is_klass() &&
2277 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2278 // Not Reference and not Object klass.
2279 gen_pre_barrier = false;
2280 }
2281 }
2282 }
2284 if (gen_pre_barrier) {
2285 LabelObj* Lcont = new LabelObj();
2287 // We can have generate one runtime check here. Let's start with
2288 // the offset check.
2289 if (gen_offset_check) {
2290 // if (offset != referent_offset) -> continue
2291 // If offset is an int then we can do the comparison with the
2292 // referent_offset constant; otherwise we need to move
2293 // referent_offset into a temporary register and generate
2294 // a reg-reg compare.
2296 LIR_Opr referent_off;
2298 if (off.type()->is_int()) {
2299 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2300 } else {
2301 assert(off.type()->is_long(), "what else?");
2302 referent_off = new_register(T_LONG);
2303 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2304 }
2305 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2306 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2307 }
2308 if (gen_source_check) {
2309 // offset is a const and equals referent offset
2310 // if (source == null) -> continue
2311 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2312 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2313 }
2314 LIR_Opr src_klass = new_register(T_OBJECT);
2315 if (gen_type_check) {
2316 // We have determined that offset == referent_offset && src != null.
2317 // if (src->_klass->_reference_type == REF_NONE) -> continue
2318 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), UseCompressedKlassPointers ? T_OBJECT : T_ADDRESS), src_klass);
2319 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2320 LIR_Opr reference_type = new_register(T_INT);
2321 __ move(reference_type_addr, reference_type);
2322 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2323 __ branch(lir_cond_equal, T_INT, Lcont->label());
2324 }
2325 {
2326 // We have determined that src->_klass->_reference_type != REF_NONE
2327 // so register the value in the referent field with the pre-barrier.
2328 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2329 value /* pre_val */,
2330 false /* do_load */,
2331 false /* patch */,
2332 NULL /* info */);
2333 }
2334 __ branch_destination(Lcont->label());
2335 }
2336 }
2337 #endif // INCLUDE_ALL_GCS
2339 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2340 }
2343 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2344 BasicType type = x->basic_type();
2345 LIRItem src(x->object(), this);
2346 LIRItem off(x->offset(), this);
2347 LIRItem data(x->value(), this);
2349 src.load_item();
2350 if (type == T_BOOLEAN || type == T_BYTE) {
2351 data.load_byte_item();
2352 } else {
2353 data.load_item();
2354 }
2355 off.load_item();
2357 set_no_result(x);
2359 if (x->is_volatile() && os::is_MP()) __ membar_release();
2360 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2361 if (x->is_volatile() && os::is_MP()) __ membar();
2362 }
2365 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2366 LIRItem src(x->object(), this);
2367 LIRItem off(x->offset(), this);
2369 src.load_item();
2370 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2371 // let it be a constant
2372 off.dont_load_item();
2373 } else {
2374 off.load_item();
2375 }
2377 set_no_result(x);
2379 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2380 __ prefetch(addr, is_store);
2381 }
2384 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2385 do_UnsafePrefetch(x, false);
2386 }
2389 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2390 do_UnsafePrefetch(x, true);
2391 }
2394 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2395 int lng = x->length();
2397 for (int i = 0; i < lng; i++) {
2398 SwitchRange* one_range = x->at(i);
2399 int low_key = one_range->low_key();
2400 int high_key = one_range->high_key();
2401 BlockBegin* dest = one_range->sux();
2402 if (low_key == high_key) {
2403 __ cmp(lir_cond_equal, value, low_key);
2404 __ branch(lir_cond_equal, T_INT, dest);
2405 } else if (high_key - low_key == 1) {
2406 __ cmp(lir_cond_equal, value, low_key);
2407 __ branch(lir_cond_equal, T_INT, dest);
2408 __ cmp(lir_cond_equal, value, high_key);
2409 __ branch(lir_cond_equal, T_INT, dest);
2410 } else {
2411 LabelObj* L = new LabelObj();
2412 __ cmp(lir_cond_less, value, low_key);
2413 __ branch(lir_cond_less, T_INT, L->label());
2414 __ cmp(lir_cond_lessEqual, value, high_key);
2415 __ branch(lir_cond_lessEqual, T_INT, dest);
2416 __ branch_destination(L->label());
2417 }
2418 }
2419 __ jump(default_sux);
2420 }
2423 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2424 SwitchRangeList* res = new SwitchRangeList();
2425 int len = x->length();
2426 if (len > 0) {
2427 BlockBegin* sux = x->sux_at(0);
2428 int key = x->lo_key();
2429 BlockBegin* default_sux = x->default_sux();
2430 SwitchRange* range = new SwitchRange(key, sux);
2431 for (int i = 0; i < len; i++, key++) {
2432 BlockBegin* new_sux = x->sux_at(i);
2433 if (sux == new_sux) {
2434 // still in same range
2435 range->set_high_key(key);
2436 } else {
2437 // skip tests which explicitly dispatch to the default
2438 if (sux != default_sux) {
2439 res->append(range);
2440 }
2441 range = new SwitchRange(key, new_sux);
2442 }
2443 sux = new_sux;
2444 }
2445 if (res->length() == 0 || res->last() != range) res->append(range);
2446 }
2447 return res;
2448 }
2451 // we expect the keys to be sorted by increasing value
2452 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2453 SwitchRangeList* res = new SwitchRangeList();
2454 int len = x->length();
2455 if (len > 0) {
2456 BlockBegin* default_sux = x->default_sux();
2457 int key = x->key_at(0);
2458 BlockBegin* sux = x->sux_at(0);
2459 SwitchRange* range = new SwitchRange(key, sux);
2460 for (int i = 1; i < len; i++) {
2461 int new_key = x->key_at(i);
2462 BlockBegin* new_sux = x->sux_at(i);
2463 if (key+1 == new_key && sux == new_sux) {
2464 // still in same range
2465 range->set_high_key(new_key);
2466 } else {
2467 // skip tests which explicitly dispatch to the default
2468 if (range->sux() != default_sux) {
2469 res->append(range);
2470 }
2471 range = new SwitchRange(new_key, new_sux);
2472 }
2473 key = new_key;
2474 sux = new_sux;
2475 }
2476 if (res->length() == 0 || res->last() != range) res->append(range);
2477 }
2478 return res;
2479 }
2482 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2483 LIRItem tag(x->tag(), this);
2484 tag.load_item();
2485 set_no_result(x);
2487 if (x->is_safepoint()) {
2488 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2489 }
2491 // move values into phi locations
2492 move_to_phi(x->state());
2494 int lo_key = x->lo_key();
2495 int hi_key = x->hi_key();
2496 int len = x->length();
2497 LIR_Opr value = tag.result();
2498 if (UseTableRanges) {
2499 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2500 } else {
2501 for (int i = 0; i < len; i++) {
2502 __ cmp(lir_cond_equal, value, i + lo_key);
2503 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2504 }
2505 __ jump(x->default_sux());
2506 }
2507 }
2510 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2511 LIRItem tag(x->tag(), this);
2512 tag.load_item();
2513 set_no_result(x);
2515 if (x->is_safepoint()) {
2516 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2517 }
2519 // move values into phi locations
2520 move_to_phi(x->state());
2522 LIR_Opr value = tag.result();
2523 if (UseTableRanges) {
2524 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2525 } else {
2526 int len = x->length();
2527 for (int i = 0; i < len; i++) {
2528 __ cmp(lir_cond_equal, value, x->key_at(i));
2529 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2530 }
2531 __ jump(x->default_sux());
2532 }
2533 }
2536 void LIRGenerator::do_Goto(Goto* x) {
2537 set_no_result(x);
2539 if (block()->next()->as_OsrEntry()) {
2540 // need to free up storage used for OSR entry point
2541 LIR_Opr osrBuffer = block()->next()->operand();
2542 BasicTypeList signature;
2543 signature.append(T_INT);
2544 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2545 __ move(osrBuffer, cc->args()->at(0));
2546 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2547 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2548 }
2550 if (x->is_safepoint()) {
2551 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2553 // increment backedge counter if needed
2554 CodeEmitInfo* info = state_for(x, state);
2555 increment_backedge_counter(info, x->profiled_bci());
2556 CodeEmitInfo* safepoint_info = state_for(x, state);
2557 __ safepoint(safepoint_poll_register(), safepoint_info);
2558 }
2560 // Gotos can be folded Ifs, handle this case.
2561 if (x->should_profile()) {
2562 ciMethod* method = x->profiled_method();
2563 assert(method != NULL, "method should be set if branch is profiled");
2564 ciMethodData* md = method->method_data_or_null();
2565 assert(md != NULL, "Sanity");
2566 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2567 assert(data != NULL, "must have profiling data");
2568 int offset;
2569 if (x->direction() == Goto::taken) {
2570 assert(data->is_BranchData(), "need BranchData for two-way branches");
2571 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2572 } else if (x->direction() == Goto::not_taken) {
2573 assert(data->is_BranchData(), "need BranchData for two-way branches");
2574 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2575 } else {
2576 assert(data->is_JumpData(), "need JumpData for branches");
2577 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2578 }
2579 LIR_Opr md_reg = new_register(T_METADATA);
2580 __ metadata2reg(md->constant_encoding(), md_reg);
2582 increment_counter(new LIR_Address(md_reg, offset,
2583 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2584 }
2586 // emit phi-instruction move after safepoint since this simplifies
2587 // describing the state as the safepoint.
2588 move_to_phi(x->state());
2590 __ jump(x->default_sux());
2591 }
2594 void LIRGenerator::do_Base(Base* x) {
2595 __ std_entry(LIR_OprFact::illegalOpr);
2596 // Emit moves from physical registers / stack slots to virtual registers
2597 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2598 IRScope* irScope = compilation()->hir()->top_scope();
2599 int java_index = 0;
2600 for (int i = 0; i < args->length(); i++) {
2601 LIR_Opr src = args->at(i);
2602 assert(!src->is_illegal(), "check");
2603 BasicType t = src->type();
2605 // Types which are smaller than int are passed as int, so
2606 // correct the type which passed.
2607 switch (t) {
2608 case T_BYTE:
2609 case T_BOOLEAN:
2610 case T_SHORT:
2611 case T_CHAR:
2612 t = T_INT;
2613 break;
2614 }
2616 LIR_Opr dest = new_register(t);
2617 __ move(src, dest);
2619 // Assign new location to Local instruction for this local
2620 Local* local = x->state()->local_at(java_index)->as_Local();
2621 assert(local != NULL, "Locals for incoming arguments must have been created");
2622 #ifndef __SOFTFP__
2623 // The java calling convention passes double as long and float as int.
2624 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2625 #endif // __SOFTFP__
2626 local->set_operand(dest);
2627 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2628 java_index += type2size[t];
2629 }
2631 if (compilation()->env()->dtrace_method_probes()) {
2632 BasicTypeList signature;
2633 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2634 signature.append(T_OBJECT); // Method*
2635 LIR_OprList* args = new LIR_OprList();
2636 args->append(getThreadPointer());
2637 LIR_Opr meth = new_register(T_METADATA);
2638 __ metadata2reg(method()->constant_encoding(), meth);
2639 args->append(meth);
2640 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2641 }
2643 if (method()->is_synchronized()) {
2644 LIR_Opr obj;
2645 if (method()->is_static()) {
2646 obj = new_register(T_OBJECT);
2647 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2648 } else {
2649 Local* receiver = x->state()->local_at(0)->as_Local();
2650 assert(receiver != NULL, "must already exist");
2651 obj = receiver->operand();
2652 }
2653 assert(obj->is_valid(), "must be valid");
2655 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2656 LIR_Opr lock = new_register(T_INT);
2657 __ load_stack_address_monitor(0, lock);
2659 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2660 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2662 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2663 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2664 }
2665 }
2667 // increment invocation counters if needed
2668 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2669 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2670 increment_invocation_counter(info);
2671 }
2673 // all blocks with a successor must end with an unconditional jump
2674 // to the successor even if they are consecutive
2675 __ jump(x->default_sux());
2676 }
2679 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2680 // construct our frame and model the production of incoming pointer
2681 // to the OSR buffer.
2682 __ osr_entry(LIR_Assembler::osrBufferPointer());
2683 LIR_Opr result = rlock_result(x);
2684 __ move(LIR_Assembler::osrBufferPointer(), result);
2685 }
2688 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2689 assert(args->length() == arg_list->length(),
2690 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2691 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2692 LIRItem* param = args->at(i);
2693 LIR_Opr loc = arg_list->at(i);
2694 if (loc->is_register()) {
2695 param->load_item_force(loc);
2696 } else {
2697 LIR_Address* addr = loc->as_address_ptr();
2698 param->load_for_store(addr->type());
2699 if (addr->type() == T_OBJECT) {
2700 __ move_wide(param->result(), addr);
2701 } else
2702 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2703 __ unaligned_move(param->result(), addr);
2704 } else {
2705 __ move(param->result(), addr);
2706 }
2707 }
2708 }
2710 if (x->has_receiver()) {
2711 LIRItem* receiver = args->at(0);
2712 LIR_Opr loc = arg_list->at(0);
2713 if (loc->is_register()) {
2714 receiver->load_item_force(loc);
2715 } else {
2716 assert(loc->is_address(), "just checking");
2717 receiver->load_for_store(T_OBJECT);
2718 __ move_wide(receiver->result(), loc->as_address_ptr());
2719 }
2720 }
2721 }
2724 // Visits all arguments, returns appropriate items without loading them
2725 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2726 LIRItemList* argument_items = new LIRItemList();
2727 if (x->has_receiver()) {
2728 LIRItem* receiver = new LIRItem(x->receiver(), this);
2729 argument_items->append(receiver);
2730 }
2731 for (int i = 0; i < x->number_of_arguments(); i++) {
2732 LIRItem* param = new LIRItem(x->argument_at(i), this);
2733 argument_items->append(param);
2734 }
2735 return argument_items;
2736 }
2739 // The invoke with receiver has following phases:
2740 // a) traverse and load/lock receiver;
2741 // b) traverse all arguments -> item-array (invoke_visit_argument)
2742 // c) push receiver on stack
2743 // d) load each of the items and push on stack
2744 // e) unlock receiver
2745 // f) move receiver into receiver-register %o0
2746 // g) lock result registers and emit call operation
2747 //
2748 // Before issuing a call, we must spill-save all values on stack
2749 // that are in caller-save register. "spill-save" moves thos registers
2750 // either in a free callee-save register or spills them if no free
2751 // callee save register is available.
2752 //
2753 // The problem is where to invoke spill-save.
2754 // - if invoked between e) and f), we may lock callee save
2755 // register in "spill-save" that destroys the receiver register
2756 // before f) is executed
2757 // - if we rearange the f) to be earlier, by loading %o0, it
2758 // may destroy a value on the stack that is currently in %o0
2759 // and is waiting to be spilled
2760 // - if we keep the receiver locked while doing spill-save,
2761 // we cannot spill it as it is spill-locked
2762 //
2763 void LIRGenerator::do_Invoke(Invoke* x) {
2764 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2766 LIR_OprList* arg_list = cc->args();
2767 LIRItemList* args = invoke_visit_arguments(x);
2768 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2770 // setup result register
2771 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2772 if (x->type() != voidType) {
2773 result_register = result_register_for(x->type());
2774 }
2776 CodeEmitInfo* info = state_for(x, x->state());
2778 invoke_load_arguments(x, args, arg_list);
2780 if (x->has_receiver()) {
2781 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2782 receiver = args->at(0)->result();
2783 }
2785 // emit invoke code
2786 bool optimized = x->target_is_loaded() && x->target_is_final();
2787 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2789 // JSR 292
2790 // Preserve the SP over MethodHandle call sites.
2791 ciMethod* target = x->target();
2792 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2793 target->is_method_handle_intrinsic() ||
2794 target->is_compiled_lambda_form());
2795 if (is_method_handle_invoke) {
2796 info->set_is_method_handle_invoke(true);
2797 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2798 }
2800 switch (x->code()) {
2801 case Bytecodes::_invokestatic:
2802 __ call_static(target, result_register,
2803 SharedRuntime::get_resolve_static_call_stub(),
2804 arg_list, info);
2805 break;
2806 case Bytecodes::_invokespecial:
2807 case Bytecodes::_invokevirtual:
2808 case Bytecodes::_invokeinterface:
2809 // for final target we still produce an inline cache, in order
2810 // to be able to call mixed mode
2811 if (x->code() == Bytecodes::_invokespecial || optimized) {
2812 __ call_opt_virtual(target, receiver, result_register,
2813 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2814 arg_list, info);
2815 } else if (x->vtable_index() < 0) {
2816 __ call_icvirtual(target, receiver, result_register,
2817 SharedRuntime::get_resolve_virtual_call_stub(),
2818 arg_list, info);
2819 } else {
2820 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2821 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2822 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2823 }
2824 break;
2825 case Bytecodes::_invokedynamic: {
2826 __ call_dynamic(target, receiver, result_register,
2827 SharedRuntime::get_resolve_static_call_stub(),
2828 arg_list, info);
2829 break;
2830 }
2831 default:
2832 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2833 break;
2834 }
2836 // JSR 292
2837 // Restore the SP after MethodHandle call sites.
2838 if (is_method_handle_invoke) {
2839 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2840 }
2842 if (x->type()->is_float() || x->type()->is_double()) {
2843 // Force rounding of results from non-strictfp when in strictfp
2844 // scope (or when we don't know the strictness of the callee, to
2845 // be safe.)
2846 if (method()->is_strict()) {
2847 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2848 result_register = round_item(result_register);
2849 }
2850 }
2851 }
2853 if (result_register->is_valid()) {
2854 LIR_Opr result = rlock_result(x);
2855 __ move(result_register, result);
2856 }
2857 }
2860 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2861 assert(x->number_of_arguments() == 1, "wrong type");
2862 LIRItem value (x->argument_at(0), this);
2863 LIR_Opr reg = rlock_result(x);
2864 value.load_item();
2865 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2866 __ move(tmp, reg);
2867 }
2871 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2872 void LIRGenerator::do_IfOp(IfOp* x) {
2873 #ifdef ASSERT
2874 {
2875 ValueTag xtag = x->x()->type()->tag();
2876 ValueTag ttag = x->tval()->type()->tag();
2877 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2878 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2879 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2880 }
2881 #endif
2883 LIRItem left(x->x(), this);
2884 LIRItem right(x->y(), this);
2885 left.load_item();
2886 if (can_inline_as_constant(right.value())) {
2887 right.dont_load_item();
2888 } else {
2889 right.load_item();
2890 }
2892 LIRItem t_val(x->tval(), this);
2893 LIRItem f_val(x->fval(), this);
2894 t_val.dont_load_item();
2895 f_val.dont_load_item();
2896 LIR_Opr reg = rlock_result(x);
2898 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2899 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2900 }
2902 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
2903 assert(x->number_of_arguments() == expected_arguments, "wrong type");
2904 LIR_Opr reg = result_register_for(x->type());
2905 __ call_runtime_leaf(routine, getThreadTemp(),
2906 reg, new LIR_OprList());
2907 LIR_Opr result = rlock_result(x);
2908 __ move(reg, result);
2909 }
2911 #ifdef TRACE_HAVE_INTRINSICS
2912 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
2913 LIR_Opr thread = getThreadPointer();
2914 LIR_Opr osthread = new_pointer_register();
2915 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
2916 size_t thread_id_size = OSThread::thread_id_size();
2917 if (thread_id_size == (size_t) BytesPerLong) {
2918 LIR_Opr id = new_register(T_LONG);
2919 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
2920 __ convert(Bytecodes::_l2i, id, rlock_result(x));
2921 } else if (thread_id_size == (size_t) BytesPerInt) {
2922 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
2923 } else {
2924 ShouldNotReachHere();
2925 }
2926 }
2928 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2929 CodeEmitInfo* info = state_for(x);
2930 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2931 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
2932 assert(info != NULL, "must have info");
2933 LIRItem arg(x->argument_at(1), this);
2934 arg.load_item();
2935 LIR_Opr klass = new_pointer_register();
2936 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
2937 LIR_Opr id = new_register(T_LONG);
2938 ByteSize offset = TRACE_ID_OFFSET;
2939 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2940 __ move(trace_id_addr, id);
2941 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2942 __ store(id, trace_id_addr);
2943 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
2944 __ move(id, rlock_result(x));
2945 }
2946 #endif
2948 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2949 switch (x->id()) {
2950 case vmIntrinsics::_intBitsToFloat :
2951 case vmIntrinsics::_doubleToRawLongBits :
2952 case vmIntrinsics::_longBitsToDouble :
2953 case vmIntrinsics::_floatToRawIntBits : {
2954 do_FPIntrinsics(x);
2955 break;
2956 }
2958 #ifdef TRACE_HAVE_INTRINSICS
2959 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
2960 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
2961 case vmIntrinsics::_counterTime:
2962 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
2963 break;
2964 #endif
2966 case vmIntrinsics::_currentTimeMillis:
2967 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
2968 break;
2970 case vmIntrinsics::_nanoTime:
2971 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
2972 break;
2974 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2975 case vmIntrinsics::_isInstance: do_isInstance(x); break;
2976 case vmIntrinsics::_getClass: do_getClass(x); break;
2977 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2979 case vmIntrinsics::_dlog: // fall through
2980 case vmIntrinsics::_dlog10: // fall through
2981 case vmIntrinsics::_dabs: // fall through
2982 case vmIntrinsics::_dsqrt: // fall through
2983 case vmIntrinsics::_dtan: // fall through
2984 case vmIntrinsics::_dsin : // fall through
2985 case vmIntrinsics::_dcos : // fall through
2986 case vmIntrinsics::_dexp : // fall through
2987 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
2988 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
2990 // java.nio.Buffer.checkIndex
2991 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
2993 case vmIntrinsics::_compareAndSwapObject:
2994 do_CompareAndSwap(x, objectType);
2995 break;
2996 case vmIntrinsics::_compareAndSwapInt:
2997 do_CompareAndSwap(x, intType);
2998 break;
2999 case vmIntrinsics::_compareAndSwapLong:
3000 do_CompareAndSwap(x, longType);
3001 break;
3003 case vmIntrinsics::_loadFence :
3004 if (os::is_MP()) __ membar_acquire();
3005 break;
3006 case vmIntrinsics::_storeFence:
3007 if (os::is_MP()) __ membar_release();
3008 break;
3009 case vmIntrinsics::_fullFence :
3010 if (os::is_MP()) __ membar();
3011 break;
3013 case vmIntrinsics::_Reference_get:
3014 do_Reference_get(x);
3015 break;
3017 default: ShouldNotReachHere(); break;
3018 }
3019 }
3021 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3022 // Need recv in a temporary register so it interferes with the other temporaries
3023 LIR_Opr recv = LIR_OprFact::illegalOpr;
3024 LIR_Opr mdo = new_register(T_OBJECT);
3025 // tmp is used to hold the counters on SPARC
3026 LIR_Opr tmp = new_pointer_register();
3027 if (x->recv() != NULL) {
3028 LIRItem value(x->recv(), this);
3029 value.load_item();
3030 recv = new_register(T_OBJECT);
3031 __ move(value.result(), recv);
3032 }
3033 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3034 }
3036 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3037 // We can safely ignore accessors here, since c2 will inline them anyway,
3038 // accessors are also always mature.
3039 if (!x->inlinee()->is_accessor()) {
3040 CodeEmitInfo* info = state_for(x, x->state(), true);
3041 // Notify the runtime very infrequently only to take care of counter overflows
3042 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3043 }
3044 }
3046 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3047 int freq_log;
3048 int level = compilation()->env()->comp_level();
3049 if (level == CompLevel_limited_profile) {
3050 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3051 } else if (level == CompLevel_full_profile) {
3052 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3053 } else {
3054 ShouldNotReachHere();
3055 }
3056 // Increment the appropriate invocation/backedge counter and notify the runtime.
3057 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3058 }
3060 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3061 ciMethod *method, int frequency,
3062 int bci, bool backedge, bool notify) {
3063 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3064 int level = _compilation->env()->comp_level();
3065 assert(level > CompLevel_simple, "Shouldn't be here");
3067 int offset = -1;
3068 LIR_Opr counter_holder = new_register(T_METADATA);
3069 LIR_Opr meth;
3070 if (level == CompLevel_limited_profile) {
3071 offset = in_bytes(backedge ? Method::backedge_counter_offset() :
3072 Method::invocation_counter_offset());
3073 __ metadata2reg(method->constant_encoding(), counter_holder);
3074 meth = counter_holder;
3075 } else if (level == CompLevel_full_profile) {
3076 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3077 MethodData::invocation_counter_offset());
3078 ciMethodData* md = method->method_data_or_null();
3079 assert(md != NULL, "Sanity");
3080 __ metadata2reg(md->constant_encoding(), counter_holder);
3081 meth = new_register(T_METADATA);
3082 __ metadata2reg(method->constant_encoding(), meth);
3083 } else {
3084 ShouldNotReachHere();
3085 }
3086 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3087 LIR_Opr result = new_register(T_INT);
3088 __ load(counter, result);
3089 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3090 __ store(result, counter);
3091 if (notify) {
3092 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3093 __ logical_and(result, mask, result);
3094 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3095 // The bci for info can point to cmp for if's we want the if bci
3096 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3097 __ branch(lir_cond_equal, T_INT, overflow);
3098 __ branch_destination(overflow->continuation());
3099 }
3100 }
3102 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3103 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3104 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3106 if (x->pass_thread()) {
3107 signature->append(T_ADDRESS);
3108 args->append(getThreadPointer());
3109 }
3111 for (int i = 0; i < x->number_of_arguments(); i++) {
3112 Value a = x->argument_at(i);
3113 LIRItem* item = new LIRItem(a, this);
3114 item->load_item();
3115 args->append(item->result());
3116 signature->append(as_BasicType(a->type()));
3117 }
3119 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3120 if (x->type() == voidType) {
3121 set_no_result(x);
3122 } else {
3123 __ move(result, rlock_result(x));
3124 }
3125 }
3127 void LIRGenerator::do_Assert(Assert *x) {
3128 #ifdef ASSERT
3129 ValueTag tag = x->x()->type()->tag();
3130 If::Condition cond = x->cond();
3132 LIRItem xitem(x->x(), this);
3133 LIRItem yitem(x->y(), this);
3134 LIRItem* xin = &xitem;
3135 LIRItem* yin = &yitem;
3137 assert(tag == intTag, "Only integer assertions are valid!");
3139 xin->load_item();
3140 yin->dont_load_item();
3142 set_no_result(x);
3144 LIR_Opr left = xin->result();
3145 LIR_Opr right = yin->result();
3147 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3148 #endif
3149 }
3152 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3155 Instruction *a = x->x();
3156 Instruction *b = x->y();
3157 if (!a || StressRangeCheckElimination) {
3158 assert(!b || StressRangeCheckElimination, "B must also be null");
3160 CodeEmitInfo *info = state_for(x, x->state());
3161 CodeStub* stub = new PredicateFailedStub(info);
3163 __ jump(stub);
3164 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3165 int a_int = a->type()->as_IntConstant()->value();
3166 int b_int = b->type()->as_IntConstant()->value();
3168 bool ok = false;
3170 switch(x->cond()) {
3171 case Instruction::eql: ok = (a_int == b_int); break;
3172 case Instruction::neq: ok = (a_int != b_int); break;
3173 case Instruction::lss: ok = (a_int < b_int); break;
3174 case Instruction::leq: ok = (a_int <= b_int); break;
3175 case Instruction::gtr: ok = (a_int > b_int); break;
3176 case Instruction::geq: ok = (a_int >= b_int); break;
3177 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3178 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3179 default: ShouldNotReachHere();
3180 }
3182 if (ok) {
3184 CodeEmitInfo *info = state_for(x, x->state());
3185 CodeStub* stub = new PredicateFailedStub(info);
3187 __ jump(stub);
3188 }
3189 } else {
3191 ValueTag tag = x->x()->type()->tag();
3192 If::Condition cond = x->cond();
3193 LIRItem xitem(x->x(), this);
3194 LIRItem yitem(x->y(), this);
3195 LIRItem* xin = &xitem;
3196 LIRItem* yin = &yitem;
3198 assert(tag == intTag, "Only integer deoptimizations are valid!");
3200 xin->load_item();
3201 yin->dont_load_item();
3202 set_no_result(x);
3204 LIR_Opr left = xin->result();
3205 LIR_Opr right = yin->result();
3207 CodeEmitInfo *info = state_for(x, x->state());
3208 CodeStub* stub = new PredicateFailedStub(info);
3210 __ cmp(lir_cond(cond), left, right);
3211 __ branch(lir_cond(cond), right->type(), stub);
3212 }
3213 }
3216 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3217 LIRItemList args(1);
3218 LIRItem value(arg1, this);
3219 args.append(&value);
3220 BasicTypeList signature;
3221 signature.append(as_BasicType(arg1->type()));
3223 return call_runtime(&signature, &args, entry, result_type, info);
3224 }
3227 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3228 LIRItemList args(2);
3229 LIRItem value1(arg1, this);
3230 LIRItem value2(arg2, this);
3231 args.append(&value1);
3232 args.append(&value2);
3233 BasicTypeList signature;
3234 signature.append(as_BasicType(arg1->type()));
3235 signature.append(as_BasicType(arg2->type()));
3237 return call_runtime(&signature, &args, entry, result_type, info);
3238 }
3241 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3242 address entry, ValueType* result_type, CodeEmitInfo* info) {
3243 // get a result register
3244 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3245 LIR_Opr result = LIR_OprFact::illegalOpr;
3246 if (result_type->tag() != voidTag) {
3247 result = new_register(result_type);
3248 phys_reg = result_register_for(result_type);
3249 }
3251 // move the arguments into the correct location
3252 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3253 assert(cc->length() == args->length(), "argument mismatch");
3254 for (int i = 0; i < args->length(); i++) {
3255 LIR_Opr arg = args->at(i);
3256 LIR_Opr loc = cc->at(i);
3257 if (loc->is_register()) {
3258 __ move(arg, loc);
3259 } else {
3260 LIR_Address* addr = loc->as_address_ptr();
3261 // if (!can_store_as_constant(arg)) {
3262 // LIR_Opr tmp = new_register(arg->type());
3263 // __ move(arg, tmp);
3264 // arg = tmp;
3265 // }
3266 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3267 __ unaligned_move(arg, addr);
3268 } else {
3269 __ move(arg, addr);
3270 }
3271 }
3272 }
3274 if (info) {
3275 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3276 } else {
3277 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3278 }
3279 if (result->is_valid()) {
3280 __ move(phys_reg, result);
3281 }
3282 return result;
3283 }
3286 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3287 address entry, ValueType* result_type, CodeEmitInfo* info) {
3288 // get a result register
3289 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3290 LIR_Opr result = LIR_OprFact::illegalOpr;
3291 if (result_type->tag() != voidTag) {
3292 result = new_register(result_type);
3293 phys_reg = result_register_for(result_type);
3294 }
3296 // move the arguments into the correct location
3297 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3299 assert(cc->length() == args->length(), "argument mismatch");
3300 for (int i = 0; i < args->length(); i++) {
3301 LIRItem* arg = args->at(i);
3302 LIR_Opr loc = cc->at(i);
3303 if (loc->is_register()) {
3304 arg->load_item_force(loc);
3305 } else {
3306 LIR_Address* addr = loc->as_address_ptr();
3307 arg->load_for_store(addr->type());
3308 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3309 __ unaligned_move(arg->result(), addr);
3310 } else {
3311 __ move(arg->result(), addr);
3312 }
3313 }
3314 }
3316 if (info) {
3317 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3318 } else {
3319 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3320 }
3321 if (result->is_valid()) {
3322 __ move(phys_reg, result);
3323 }
3324 return result;
3325 }
3327 void LIRGenerator::do_MemBar(MemBar* x) {
3328 if (os::is_MP()) {
3329 LIR_Code code = x->code();
3330 switch(code) {
3331 case lir_membar_acquire : __ membar_acquire(); break;
3332 case lir_membar_release : __ membar_release(); break;
3333 case lir_membar : __ membar(); break;
3334 case lir_membar_loadload : __ membar_loadload(); break;
3335 case lir_membar_storestore: __ membar_storestore(); break;
3336 case lir_membar_loadstore : __ membar_loadstore(); break;
3337 case lir_membar_storeload : __ membar_storeload(); break;
3338 default : ShouldNotReachHere(); break;
3339 }
3340 }
3341 }