Mon, 20 Aug 2012 09:58:58 -0700
7190310: Inlining WeakReference.get(), and hoisting $referent may lead to non-terminating loops
Summary: In C2 add software membar after load from Reference.referent field to prevent commoning of loads across safepoint since GC can change its value. In C1 always generate Reference.get() intrinsic.
Reviewed-by: roland, twisti, dholmes, johnc
1 /*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciCPCache.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/bitMap.inline.hpp"
38 #ifndef SERIALGC
39 #include "gc_implementation/g1/heapRegion.hpp"
40 #endif
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
48 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
49 #ifdef ARM
50 #define PATCHED_ADDR (204)
51 #else
52 #define PATCHED_ADDR (max_jint)
53 #endif
55 void PhiResolverState::reset(int max_vregs) {
56 // Initialize array sizes
57 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
58 _virtual_operands.trunc_to(0);
59 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
60 _other_operands.trunc_to(0);
61 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
62 _vreg_table.trunc_to(0);
63 }
67 //--------------------------------------------------------------
68 // PhiResolver
70 // Resolves cycles:
71 //
72 // r1 := r2 becomes temp := r1
73 // r2 := r1 r1 := r2
74 // r2 := temp
75 // and orders moves:
76 //
77 // r2 := r3 becomes r1 := r2
78 // r1 := r2 r2 := r3
80 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
81 : _gen(gen)
82 , _state(gen->resolver_state())
83 , _temp(LIR_OprFact::illegalOpr)
84 {
85 // reinitialize the shared state arrays
86 _state.reset(max_vregs);
87 }
90 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
91 assert(src->is_valid(), "");
92 assert(dest->is_valid(), "");
93 __ move(src, dest);
94 }
97 void PhiResolver::move_temp_to(LIR_Opr dest) {
98 assert(_temp->is_valid(), "");
99 emit_move(_temp, dest);
100 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
101 }
104 void PhiResolver::move_to_temp(LIR_Opr src) {
105 assert(_temp->is_illegal(), "");
106 _temp = _gen->new_register(src->type());
107 emit_move(src, _temp);
108 }
111 // Traverse assignment graph in depth first order and generate moves in post order
112 // ie. two assignments: b := c, a := b start with node c:
113 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
114 // Generates moves in this order: move b to a and move c to b
115 // ie. cycle a := b, b := a start with node a
116 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
117 // Generates moves in this order: move b to temp, move a to b, move temp to a
118 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
119 if (!dest->visited()) {
120 dest->set_visited();
121 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
122 move(dest, dest->destination_at(i));
123 }
124 } else if (!dest->start_node()) {
125 // cylce in graph detected
126 assert(_loop == NULL, "only one loop valid!");
127 _loop = dest;
128 move_to_temp(src->operand());
129 return;
130 } // else dest is a start node
132 if (!dest->assigned()) {
133 if (_loop == dest) {
134 move_temp_to(dest->operand());
135 dest->set_assigned();
136 } else if (src != NULL) {
137 emit_move(src->operand(), dest->operand());
138 dest->set_assigned();
139 }
140 }
141 }
144 PhiResolver::~PhiResolver() {
145 int i;
146 // resolve any cycles in moves from and to virtual registers
147 for (i = virtual_operands().length() - 1; i >= 0; i --) {
148 ResolveNode* node = virtual_operands()[i];
149 if (!node->visited()) {
150 _loop = NULL;
151 move(NULL, node);
152 node->set_start_node();
153 assert(_temp->is_illegal(), "move_temp_to() call missing");
154 }
155 }
157 // generate move for move from non virtual register to abitrary destination
158 for (i = other_operands().length() - 1; i >= 0; i --) {
159 ResolveNode* node = other_operands()[i];
160 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
161 emit_move(node->operand(), node->destination_at(j)->operand());
162 }
163 }
164 }
167 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
168 ResolveNode* node;
169 if (opr->is_virtual()) {
170 int vreg_num = opr->vreg_number();
171 node = vreg_table().at_grow(vreg_num, NULL);
172 assert(node == NULL || node->operand() == opr, "");
173 if (node == NULL) {
174 node = new ResolveNode(opr);
175 vreg_table()[vreg_num] = node;
176 }
177 // Make sure that all virtual operands show up in the list when
178 // they are used as the source of a move.
179 if (source && !virtual_operands().contains(node)) {
180 virtual_operands().append(node);
181 }
182 } else {
183 assert(source, "");
184 node = new ResolveNode(opr);
185 other_operands().append(node);
186 }
187 return node;
188 }
191 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
192 assert(dest->is_virtual(), "");
193 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
194 assert(src->is_valid(), "");
195 assert(dest->is_valid(), "");
196 ResolveNode* source = source_node(src);
197 source->append(destination_node(dest));
198 }
201 //--------------------------------------------------------------
202 // LIRItem
204 void LIRItem::set_result(LIR_Opr opr) {
205 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
206 value()->set_operand(opr);
208 if (opr->is_virtual()) {
209 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
210 }
212 _result = opr;
213 }
215 void LIRItem::load_item() {
216 if (result()->is_illegal()) {
217 // update the items result
218 _result = value()->operand();
219 }
220 if (!result()->is_register()) {
221 LIR_Opr reg = _gen->new_register(value()->type());
222 __ move(result(), reg);
223 if (result()->is_constant()) {
224 _result = reg;
225 } else {
226 set_result(reg);
227 }
228 }
229 }
232 void LIRItem::load_for_store(BasicType type) {
233 if (_gen->can_store_as_constant(value(), type)) {
234 _result = value()->operand();
235 if (!_result->is_constant()) {
236 _result = LIR_OprFact::value_type(value()->type());
237 }
238 } else if (type == T_BYTE || type == T_BOOLEAN) {
239 load_byte_item();
240 } else {
241 load_item();
242 }
243 }
245 void LIRItem::load_item_force(LIR_Opr reg) {
246 LIR_Opr r = result();
247 if (r != reg) {
248 #if !defined(ARM) && !defined(E500V2)
249 if (r->type() != reg->type()) {
250 // moves between different types need an intervening spill slot
251 r = _gen->force_to_spill(r, reg->type());
252 }
253 #endif
254 __ move(r, reg);
255 _result = reg;
256 }
257 }
259 ciObject* LIRItem::get_jobject_constant() const {
260 ObjectType* oc = type()->as_ObjectType();
261 if (oc) {
262 return oc->constant_value();
263 }
264 return NULL;
265 }
268 jint LIRItem::get_jint_constant() const {
269 assert(is_constant() && value() != NULL, "");
270 assert(type()->as_IntConstant() != NULL, "type check");
271 return type()->as_IntConstant()->value();
272 }
275 jint LIRItem::get_address_constant() const {
276 assert(is_constant() && value() != NULL, "");
277 assert(type()->as_AddressConstant() != NULL, "type check");
278 return type()->as_AddressConstant()->value();
279 }
282 jfloat LIRItem::get_jfloat_constant() const {
283 assert(is_constant() && value() != NULL, "");
284 assert(type()->as_FloatConstant() != NULL, "type check");
285 return type()->as_FloatConstant()->value();
286 }
289 jdouble LIRItem::get_jdouble_constant() const {
290 assert(is_constant() && value() != NULL, "");
291 assert(type()->as_DoubleConstant() != NULL, "type check");
292 return type()->as_DoubleConstant()->value();
293 }
296 jlong LIRItem::get_jlong_constant() const {
297 assert(is_constant() && value() != NULL, "");
298 assert(type()->as_LongConstant() != NULL, "type check");
299 return type()->as_LongConstant()->value();
300 }
304 //--------------------------------------------------------------
307 void LIRGenerator::init() {
308 _bs = Universe::heap()->barrier_set();
309 }
312 void LIRGenerator::block_do_prolog(BlockBegin* block) {
313 #ifndef PRODUCT
314 if (PrintIRWithLIR) {
315 block->print();
316 }
317 #endif
319 // set up the list of LIR instructions
320 assert(block->lir() == NULL, "LIR list already computed for this block");
321 _lir = new LIR_List(compilation(), block);
322 block->set_lir(_lir);
324 __ branch_destination(block->label());
326 if (LIRTraceExecution &&
327 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
328 !block->is_set(BlockBegin::exception_entry_flag)) {
329 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
330 trace_block_entry(block);
331 }
332 }
335 void LIRGenerator::block_do_epilog(BlockBegin* block) {
336 #ifndef PRODUCT
337 if (PrintIRWithLIR) {
338 tty->cr();
339 }
340 #endif
342 // LIR_Opr for unpinned constants shouldn't be referenced by other
343 // blocks so clear them out after processing the block.
344 for (int i = 0; i < _unpinned_constants.length(); i++) {
345 _unpinned_constants.at(i)->clear_operand();
346 }
347 _unpinned_constants.trunc_to(0);
349 // clear our any registers for other local constants
350 _constants.trunc_to(0);
351 _reg_for_constants.trunc_to(0);
352 }
355 void LIRGenerator::block_do(BlockBegin* block) {
356 CHECK_BAILOUT();
358 block_do_prolog(block);
359 set_block(block);
361 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
362 if (instr->is_pinned()) do_root(instr);
363 }
365 set_block(NULL);
366 block_do_epilog(block);
367 }
370 //-------------------------LIRGenerator-----------------------------
372 // This is where the tree-walk starts; instr must be root;
373 void LIRGenerator::do_root(Value instr) {
374 CHECK_BAILOUT();
376 InstructionMark im(compilation(), instr);
378 assert(instr->is_pinned(), "use only with roots");
379 assert(instr->subst() == instr, "shouldn't have missed substitution");
381 instr->visit(this);
383 assert(!instr->has_uses() || instr->operand()->is_valid() ||
384 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
385 }
388 // This is called for each node in tree; the walk stops if a root is reached
389 void LIRGenerator::walk(Value instr) {
390 InstructionMark im(compilation(), instr);
391 //stop walk when encounter a root
392 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
393 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
394 } else {
395 assert(instr->subst() == instr, "shouldn't have missed substitution");
396 instr->visit(this);
397 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
398 }
399 }
402 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
403 assert(state != NULL, "state must be defined");
405 ValueStack* s = state;
406 for_each_state(s) {
407 if (s->kind() == ValueStack::EmptyExceptionState) {
408 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
409 continue;
410 }
412 int index;
413 Value value;
414 for_each_stack_value(s, index, value) {
415 assert(value->subst() == value, "missed substitution");
416 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
417 walk(value);
418 assert(value->operand()->is_valid(), "must be evaluated now");
419 }
420 }
422 int bci = s->bci();
423 IRScope* scope = s->scope();
424 ciMethod* method = scope->method();
426 MethodLivenessResult liveness = method->liveness_at_bci(bci);
427 if (bci == SynchronizationEntryBCI) {
428 if (x->as_ExceptionObject() || x->as_Throw()) {
429 // all locals are dead on exit from the synthetic unlocker
430 liveness.clear();
431 } else {
432 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
433 }
434 }
435 if (!liveness.is_valid()) {
436 // Degenerate or breakpointed method.
437 bailout("Degenerate or breakpointed method");
438 } else {
439 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
440 for_each_local_value(s, index, value) {
441 assert(value->subst() == value, "missed substition");
442 if (liveness.at(index) && !value->type()->is_illegal()) {
443 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
444 walk(value);
445 assert(value->operand()->is_valid(), "must be evaluated now");
446 }
447 } else {
448 // NULL out this local so that linear scan can assume that all non-NULL values are live.
449 s->invalidate_local(index);
450 }
451 }
452 }
453 }
455 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
456 }
459 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
460 return state_for(x, x->exception_state());
461 }
464 void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
465 if (!obj->is_loaded() || PatchALot) {
466 assert(info != NULL, "info must be set if class is not loaded");
467 __ oop2reg_patch(NULL, r, info);
468 } else {
469 // no patching needed
470 __ oop2reg(obj->constant_encoding(), r);
471 }
472 }
475 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
476 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
477 CodeStub* stub = new RangeCheckStub(range_check_info, index);
478 if (index->is_constant()) {
479 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
480 index->as_jint(), null_check_info);
481 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
482 } else {
483 cmp_reg_mem(lir_cond_aboveEqual, index, array,
484 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
485 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
486 }
487 }
490 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
491 CodeStub* stub = new RangeCheckStub(info, index, true);
492 if (index->is_constant()) {
493 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
494 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
495 } else {
496 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
497 java_nio_Buffer::limit_offset(), T_INT, info);
498 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
499 }
500 __ move(index, result);
501 }
505 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
506 LIR_Opr result_op = result;
507 LIR_Opr left_op = left;
508 LIR_Opr right_op = right;
510 if (TwoOperandLIRForm && left_op != result_op) {
511 assert(right_op != result_op, "malformed");
512 __ move(left_op, result_op);
513 left_op = result_op;
514 }
516 switch(code) {
517 case Bytecodes::_dadd:
518 case Bytecodes::_fadd:
519 case Bytecodes::_ladd:
520 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
521 case Bytecodes::_fmul:
522 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
524 case Bytecodes::_dmul:
525 {
526 if (is_strictfp) {
527 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
528 } else {
529 __ mul(left_op, right_op, result_op); break;
530 }
531 }
532 break;
534 case Bytecodes::_imul:
535 {
536 bool did_strength_reduce = false;
538 if (right->is_constant()) {
539 int c = right->as_jint();
540 if (is_power_of_2(c)) {
541 // do not need tmp here
542 __ shift_left(left_op, exact_log2(c), result_op);
543 did_strength_reduce = true;
544 } else {
545 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
546 }
547 }
548 // we couldn't strength reduce so just emit the multiply
549 if (!did_strength_reduce) {
550 __ mul(left_op, right_op, result_op);
551 }
552 }
553 break;
555 case Bytecodes::_dsub:
556 case Bytecodes::_fsub:
557 case Bytecodes::_lsub:
558 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
560 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
561 // ldiv and lrem are implemented with a direct runtime call
563 case Bytecodes::_ddiv:
564 {
565 if (is_strictfp) {
566 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
567 } else {
568 __ div (left_op, right_op, result_op); break;
569 }
570 }
571 break;
573 case Bytecodes::_drem:
574 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
576 default: ShouldNotReachHere();
577 }
578 }
581 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
582 arithmetic_op(code, result, left, right, false, tmp);
583 }
586 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
587 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
588 }
591 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
592 arithmetic_op(code, result, left, right, is_strictfp, tmp);
593 }
596 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
597 if (TwoOperandLIRForm && value != result_op) {
598 assert(count != result_op, "malformed");
599 __ move(value, result_op);
600 value = result_op;
601 }
603 assert(count->is_constant() || count->is_register(), "must be");
604 switch(code) {
605 case Bytecodes::_ishl:
606 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
607 case Bytecodes::_ishr:
608 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
609 case Bytecodes::_iushr:
610 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
611 default: ShouldNotReachHere();
612 }
613 }
616 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
617 if (TwoOperandLIRForm && left_op != result_op) {
618 assert(right_op != result_op, "malformed");
619 __ move(left_op, result_op);
620 left_op = result_op;
621 }
623 switch(code) {
624 case Bytecodes::_iand:
625 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
627 case Bytecodes::_ior:
628 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
630 case Bytecodes::_ixor:
631 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
633 default: ShouldNotReachHere();
634 }
635 }
638 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
639 if (!GenerateSynchronizationCode) return;
640 // for slow path, use debug info for state after successful locking
641 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
642 __ load_stack_address_monitor(monitor_no, lock);
643 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
644 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
645 }
648 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
649 if (!GenerateSynchronizationCode) return;
650 // setup registers
651 LIR_Opr hdr = lock;
652 lock = new_hdr;
653 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
654 __ load_stack_address_monitor(monitor_no, lock);
655 __ unlock_object(hdr, object, lock, scratch, slow_path);
656 }
659 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
660 jobject2reg_with_patching(klass_reg, klass, info);
661 // If klass is not loaded we do not know if the klass has finalizers:
662 if (UseFastNewInstance && klass->is_loaded()
663 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
665 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
667 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
669 assert(klass->is_loaded(), "must be loaded");
670 // allocate space for instance
671 assert(klass->size_helper() >= 0, "illegal instance size");
672 const int instance_size = align_object_size(klass->size_helper());
673 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
674 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
675 } else {
676 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
677 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
678 __ branch_destination(slow_path->continuation());
679 }
680 }
683 static bool is_constant_zero(Instruction* inst) {
684 IntConstant* c = inst->type()->as_IntConstant();
685 if (c) {
686 return (c->value() == 0);
687 }
688 return false;
689 }
692 static bool positive_constant(Instruction* inst) {
693 IntConstant* c = inst->type()->as_IntConstant();
694 if (c) {
695 return (c->value() >= 0);
696 }
697 return false;
698 }
701 static ciArrayKlass* as_array_klass(ciType* type) {
702 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
703 return (ciArrayKlass*)type;
704 } else {
705 return NULL;
706 }
707 }
709 static Value maxvalue(IfOp* ifop) {
710 switch (ifop->cond()) {
711 case If::eql: return NULL;
712 case If::neq: return NULL;
713 case If::lss: // x < y ? x : y
714 case If::leq: // x <= y ? x : y
715 if (ifop->x() == ifop->tval() &&
716 ifop->y() == ifop->fval()) return ifop->y();
717 return NULL;
719 case If::gtr: // x > y ? y : x
720 case If::geq: // x >= y ? y : x
721 if (ifop->x() == ifop->tval() &&
722 ifop->y() == ifop->fval()) return ifop->y();
723 return NULL;
725 }
726 }
728 static ciType* phi_declared_type(Phi* phi) {
729 ciType* t = phi->operand_at(0)->declared_type();
730 if (t == NULL) {
731 return NULL;
732 }
733 for(int i = 1; i < phi->operand_count(); i++) {
734 if (t != phi->operand_at(i)->declared_type()) {
735 return NULL;
736 }
737 }
738 return t;
739 }
741 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
742 Instruction* src = x->argument_at(0);
743 Instruction* src_pos = x->argument_at(1);
744 Instruction* dst = x->argument_at(2);
745 Instruction* dst_pos = x->argument_at(3);
746 Instruction* length = x->argument_at(4);
748 // first try to identify the likely type of the arrays involved
749 ciArrayKlass* expected_type = NULL;
750 bool is_exact = false, src_objarray = false, dst_objarray = false;
751 {
752 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
753 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
754 Phi* phi;
755 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
756 src_declared_type = as_array_klass(phi_declared_type(phi));
757 }
758 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
759 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
760 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
761 dst_declared_type = as_array_klass(phi_declared_type(phi));
762 }
764 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
765 // the types exactly match so the type is fully known
766 is_exact = true;
767 expected_type = src_exact_type;
768 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
769 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
770 ciArrayKlass* src_type = NULL;
771 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
772 src_type = (ciArrayKlass*) src_exact_type;
773 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
774 src_type = (ciArrayKlass*) src_declared_type;
775 }
776 if (src_type != NULL) {
777 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
778 is_exact = true;
779 expected_type = dst_type;
780 }
781 }
782 }
783 // at least pass along a good guess
784 if (expected_type == NULL) expected_type = dst_exact_type;
785 if (expected_type == NULL) expected_type = src_declared_type;
786 if (expected_type == NULL) expected_type = dst_declared_type;
788 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
789 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
790 }
792 // if a probable array type has been identified, figure out if any
793 // of the required checks for a fast case can be elided.
794 int flags = LIR_OpArrayCopy::all_flags;
796 if (!src_objarray)
797 flags &= ~LIR_OpArrayCopy::src_objarray;
798 if (!dst_objarray)
799 flags &= ~LIR_OpArrayCopy::dst_objarray;
801 if (!x->arg_needs_null_check(0))
802 flags &= ~LIR_OpArrayCopy::src_null_check;
803 if (!x->arg_needs_null_check(2))
804 flags &= ~LIR_OpArrayCopy::dst_null_check;
807 if (expected_type != NULL) {
808 Value length_limit = NULL;
810 IfOp* ifop = length->as_IfOp();
811 if (ifop != NULL) {
812 // look for expressions like min(v, a.length) which ends up as
813 // x > y ? y : x or x >= y ? y : x
814 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
815 ifop->x() == ifop->fval() &&
816 ifop->y() == ifop->tval()) {
817 length_limit = ifop->y();
818 }
819 }
821 // try to skip null checks and range checks
822 NewArray* src_array = src->as_NewArray();
823 if (src_array != NULL) {
824 flags &= ~LIR_OpArrayCopy::src_null_check;
825 if (length_limit != NULL &&
826 src_array->length() == length_limit &&
827 is_constant_zero(src_pos)) {
828 flags &= ~LIR_OpArrayCopy::src_range_check;
829 }
830 }
832 NewArray* dst_array = dst->as_NewArray();
833 if (dst_array != NULL) {
834 flags &= ~LIR_OpArrayCopy::dst_null_check;
835 if (length_limit != NULL &&
836 dst_array->length() == length_limit &&
837 is_constant_zero(dst_pos)) {
838 flags &= ~LIR_OpArrayCopy::dst_range_check;
839 }
840 }
842 // check from incoming constant values
843 if (positive_constant(src_pos))
844 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
845 if (positive_constant(dst_pos))
846 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
847 if (positive_constant(length))
848 flags &= ~LIR_OpArrayCopy::length_positive_check;
850 // see if the range check can be elided, which might also imply
851 // that src or dst is non-null.
852 ArrayLength* al = length->as_ArrayLength();
853 if (al != NULL) {
854 if (al->array() == src) {
855 // it's the length of the source array
856 flags &= ~LIR_OpArrayCopy::length_positive_check;
857 flags &= ~LIR_OpArrayCopy::src_null_check;
858 if (is_constant_zero(src_pos))
859 flags &= ~LIR_OpArrayCopy::src_range_check;
860 }
861 if (al->array() == dst) {
862 // it's the length of the destination array
863 flags &= ~LIR_OpArrayCopy::length_positive_check;
864 flags &= ~LIR_OpArrayCopy::dst_null_check;
865 if (is_constant_zero(dst_pos))
866 flags &= ~LIR_OpArrayCopy::dst_range_check;
867 }
868 }
869 if (is_exact) {
870 flags &= ~LIR_OpArrayCopy::type_check;
871 }
872 }
874 IntConstant* src_int = src_pos->type()->as_IntConstant();
875 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
876 if (src_int && dst_int) {
877 int s_offs = src_int->value();
878 int d_offs = dst_int->value();
879 if (src_int->value() >= dst_int->value()) {
880 flags &= ~LIR_OpArrayCopy::overlapping;
881 }
882 if (expected_type != NULL) {
883 BasicType t = expected_type->element_type()->basic_type();
884 int element_size = type2aelembytes(t);
885 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
886 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
887 flags &= ~LIR_OpArrayCopy::unaligned;
888 }
889 }
890 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
891 // src and dest positions are the same, or dst is zero so assume
892 // nonoverlapping copy.
893 flags &= ~LIR_OpArrayCopy::overlapping;
894 }
896 if (src == dst) {
897 // moving within a single array so no type checks are needed
898 if (flags & LIR_OpArrayCopy::type_check) {
899 flags &= ~LIR_OpArrayCopy::type_check;
900 }
901 }
902 *flagsp = flags;
903 *expected_typep = (ciArrayKlass*)expected_type;
904 }
907 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
908 assert(opr->is_register(), "why spill if item is not register?");
910 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
911 LIR_Opr result = new_register(T_FLOAT);
912 set_vreg_flag(result, must_start_in_memory);
913 assert(opr->is_register(), "only a register can be spilled");
914 assert(opr->value_type()->is_float(), "rounding only for floats available");
915 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
916 return result;
917 }
918 return opr;
919 }
922 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
923 assert(type2size[t] == type2size[value->type()], "size mismatch");
924 if (!value->is_register()) {
925 // force into a register
926 LIR_Opr r = new_register(value->type());
927 __ move(value, r);
928 value = r;
929 }
931 // create a spill location
932 LIR_Opr tmp = new_register(t);
933 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
935 // move from register to spill
936 __ move(value, tmp);
937 return tmp;
938 }
940 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
941 if (if_instr->should_profile()) {
942 ciMethod* method = if_instr->profiled_method();
943 assert(method != NULL, "method should be set if branch is profiled");
944 ciMethodData* md = method->method_data_or_null();
945 assert(md != NULL, "Sanity");
946 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
947 assert(data != NULL, "must have profiling data");
948 assert(data->is_BranchData(), "need BranchData for two-way branches");
949 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
950 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
951 if (if_instr->is_swapped()) {
952 int t = taken_count_offset;
953 taken_count_offset = not_taken_count_offset;
954 not_taken_count_offset = t;
955 }
957 LIR_Opr md_reg = new_register(T_OBJECT);
958 __ oop2reg(md->constant_encoding(), md_reg);
960 LIR_Opr data_offset_reg = new_pointer_register();
961 __ cmove(lir_cond(cond),
962 LIR_OprFact::intptrConst(taken_count_offset),
963 LIR_OprFact::intptrConst(not_taken_count_offset),
964 data_offset_reg, as_BasicType(if_instr->x()->type()));
966 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
967 LIR_Opr data_reg = new_pointer_register();
968 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
969 __ move(data_addr, data_reg);
970 // Use leal instead of add to avoid destroying condition codes on x86
971 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
972 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
973 __ move(data_reg, data_addr);
974 }
975 }
977 // Phi technique:
978 // This is about passing live values from one basic block to the other.
979 // In code generated with Java it is rather rare that more than one
980 // value is on the stack from one basic block to the other.
981 // We optimize our technique for efficient passing of one value
982 // (of type long, int, double..) but it can be extended.
983 // When entering or leaving a basic block, all registers and all spill
984 // slots are release and empty. We use the released registers
985 // and spill slots to pass the live values from one block
986 // to the other. The topmost value, i.e., the value on TOS of expression
987 // stack is passed in registers. All other values are stored in spilling
988 // area. Every Phi has an index which designates its spill slot
989 // At exit of a basic block, we fill the register(s) and spill slots.
990 // At entry of a basic block, the block_prolog sets up the content of phi nodes
991 // and locks necessary registers and spilling slots.
994 // move current value to referenced phi function
995 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
996 Phi* phi = sux_val->as_Phi();
997 // cur_val can be null without phi being null in conjunction with inlining
998 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
999 LIR_Opr operand = cur_val->operand();
1000 if (cur_val->operand()->is_illegal()) {
1001 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1002 "these can be produced lazily");
1003 operand = operand_for_instruction(cur_val);
1004 }
1005 resolver->move(operand, operand_for_instruction(phi));
1006 }
1007 }
1010 // Moves all stack values into their PHI position
1011 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1012 BlockBegin* bb = block();
1013 if (bb->number_of_sux() == 1) {
1014 BlockBegin* sux = bb->sux_at(0);
1015 assert(sux->number_of_preds() > 0, "invalid CFG");
1017 // a block with only one predecessor never has phi functions
1018 if (sux->number_of_preds() > 1) {
1019 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1020 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1022 ValueStack* sux_state = sux->state();
1023 Value sux_value;
1024 int index;
1026 assert(cur_state->scope() == sux_state->scope(), "not matching");
1027 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1028 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1030 for_each_stack_value(sux_state, index, sux_value) {
1031 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1032 }
1034 for_each_local_value(sux_state, index, sux_value) {
1035 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1036 }
1038 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1039 }
1040 }
1041 }
1044 LIR_Opr LIRGenerator::new_register(BasicType type) {
1045 int vreg = _virtual_register_number;
1046 // add a little fudge factor for the bailout, since the bailout is
1047 // only checked periodically. This gives a few extra registers to
1048 // hand out before we really run out, which helps us keep from
1049 // tripping over assertions.
1050 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1051 bailout("out of virtual registers");
1052 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1053 // wrap it around
1054 _virtual_register_number = LIR_OprDesc::vreg_base;
1055 }
1056 }
1057 _virtual_register_number += 1;
1058 return LIR_OprFact::virtual_register(vreg, type);
1059 }
1062 // Try to lock using register in hint
1063 LIR_Opr LIRGenerator::rlock(Value instr) {
1064 return new_register(instr->type());
1065 }
1068 // does an rlock and sets result
1069 LIR_Opr LIRGenerator::rlock_result(Value x) {
1070 LIR_Opr reg = rlock(x);
1071 set_result(x, reg);
1072 return reg;
1073 }
1076 // does an rlock and sets result
1077 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1078 LIR_Opr reg;
1079 switch (type) {
1080 case T_BYTE:
1081 case T_BOOLEAN:
1082 reg = rlock_byte(type);
1083 break;
1084 default:
1085 reg = rlock(x);
1086 break;
1087 }
1089 set_result(x, reg);
1090 return reg;
1091 }
1094 //---------------------------------------------------------------------
1095 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1096 ObjectType* oc = value->type()->as_ObjectType();
1097 if (oc) {
1098 return oc->constant_value();
1099 }
1100 return NULL;
1101 }
1104 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1105 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1106 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1108 // no moves are created for phi functions at the begin of exception
1109 // handlers, so assign operands manually here
1110 for_each_phi_fun(block(), phi,
1111 operand_for_instruction(phi));
1113 LIR_Opr thread_reg = getThreadPointer();
1114 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1115 exceptionOopOpr());
1116 __ move_wide(LIR_OprFact::oopConst(NULL),
1117 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1118 __ move_wide(LIR_OprFact::oopConst(NULL),
1119 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1121 LIR_Opr result = new_register(T_OBJECT);
1122 __ move(exceptionOopOpr(), result);
1123 set_result(x, result);
1124 }
1127 //----------------------------------------------------------------------
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 // visitor functions
1132 //----------------------------------------------------------------------
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1137 void LIRGenerator::do_Phi(Phi* x) {
1138 // phi functions are never visited directly
1139 ShouldNotReachHere();
1140 }
1143 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1144 void LIRGenerator::do_Constant(Constant* x) {
1145 if (x->state_before() != NULL) {
1146 // Any constant with a ValueStack requires patching so emit the patch here
1147 LIR_Opr reg = rlock_result(x);
1148 CodeEmitInfo* info = state_for(x, x->state_before());
1149 __ oop2reg_patch(NULL, reg, info);
1150 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1151 if (!x->is_pinned()) {
1152 // unpinned constants are handled specially so that they can be
1153 // put into registers when they are used multiple times within a
1154 // block. After the block completes their operand will be
1155 // cleared so that other blocks can't refer to that register.
1156 set_result(x, load_constant(x));
1157 } else {
1158 LIR_Opr res = x->operand();
1159 if (!res->is_valid()) {
1160 res = LIR_OprFact::value_type(x->type());
1161 }
1162 if (res->is_constant()) {
1163 LIR_Opr reg = rlock_result(x);
1164 __ move(res, reg);
1165 } else {
1166 set_result(x, res);
1167 }
1168 }
1169 } else {
1170 set_result(x, LIR_OprFact::value_type(x->type()));
1171 }
1172 }
1175 void LIRGenerator::do_Local(Local* x) {
1176 // operand_for_instruction has the side effect of setting the result
1177 // so there's no need to do it here.
1178 operand_for_instruction(x);
1179 }
1182 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1183 Unimplemented();
1184 }
1187 void LIRGenerator::do_Return(Return* x) {
1188 if (compilation()->env()->dtrace_method_probes()) {
1189 BasicTypeList signature;
1190 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1191 signature.append(T_OBJECT); // methodOop
1192 LIR_OprList* args = new LIR_OprList();
1193 args->append(getThreadPointer());
1194 LIR_Opr meth = new_register(T_OBJECT);
1195 __ oop2reg(method()->constant_encoding(), meth);
1196 args->append(meth);
1197 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1198 }
1200 if (x->type()->is_void()) {
1201 __ return_op(LIR_OprFact::illegalOpr);
1202 } else {
1203 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1204 LIRItem result(x->result(), this);
1206 result.load_item_force(reg);
1207 __ return_op(result.result());
1208 }
1209 set_no_result(x);
1210 }
1212 // Examble: ref.get()
1213 // Combination of LoadField and g1 pre-write barrier
1214 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1216 const int referent_offset = java_lang_ref_Reference::referent_offset;
1217 guarantee(referent_offset > 0, "referent offset not initialized");
1219 assert(x->number_of_arguments() == 1, "wrong type");
1221 LIRItem reference(x->argument_at(0), this);
1222 reference.load_item();
1224 // need to perform the null check on the reference objecy
1225 CodeEmitInfo* info = NULL;
1226 if (x->needs_null_check()) {
1227 info = state_for(x);
1228 }
1230 LIR_Address* referent_field_adr =
1231 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1233 LIR_Opr result = rlock_result(x);
1235 __ load(referent_field_adr, result, info);
1237 // Register the value in the referent field with the pre-barrier
1238 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1239 result /* pre_val */,
1240 false /* do_load */,
1241 false /* patch */,
1242 NULL /* info */);
1243 }
1245 // Example: clazz.isInstance(object)
1246 void LIRGenerator::do_isInstance(Intrinsic* x) {
1247 assert(x->number_of_arguments() == 2, "wrong type");
1249 // TODO could try to substitute this node with an equivalent InstanceOf
1250 // if clazz is known to be a constant Class. This will pick up newly found
1251 // constants after HIR construction. I'll leave this to a future change.
1253 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1254 // could follow the aastore example in a future change.
1256 LIRItem clazz(x->argument_at(0), this);
1257 LIRItem object(x->argument_at(1), this);
1258 clazz.load_item();
1259 object.load_item();
1260 LIR_Opr result = rlock_result(x);
1262 // need to perform null check on clazz
1263 if (x->needs_null_check()) {
1264 CodeEmitInfo* info = state_for(x);
1265 __ null_check(clazz.result(), info);
1266 }
1268 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1269 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1270 x->type(),
1271 NULL); // NULL CodeEmitInfo results in a leaf call
1272 __ move(call_result, result);
1273 }
1275 // Example: object.getClass ()
1276 void LIRGenerator::do_getClass(Intrinsic* x) {
1277 assert(x->number_of_arguments() == 1, "wrong type");
1279 LIRItem rcvr(x->argument_at(0), this);
1280 rcvr.load_item();
1281 LIR_Opr result = rlock_result(x);
1283 // need to perform the null check on the rcvr
1284 CodeEmitInfo* info = NULL;
1285 if (x->needs_null_check()) {
1286 info = state_for(x);
1287 }
1288 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
1289 __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1290 }
1293 // Example: Thread.currentThread()
1294 void LIRGenerator::do_currentThread(Intrinsic* x) {
1295 assert(x->number_of_arguments() == 0, "wrong type");
1296 LIR_Opr reg = rlock_result(x);
1297 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1298 }
1301 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1302 assert(x->number_of_arguments() == 1, "wrong type");
1303 LIRItem receiver(x->argument_at(0), this);
1305 receiver.load_item();
1306 BasicTypeList signature;
1307 signature.append(T_OBJECT); // receiver
1308 LIR_OprList* args = new LIR_OprList();
1309 args->append(receiver.result());
1310 CodeEmitInfo* info = state_for(x, x->state());
1311 call_runtime(&signature, args,
1312 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1313 voidType, info);
1315 set_no_result(x);
1316 }
1319 //------------------------local access--------------------------------------
1321 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1322 if (x->operand()->is_illegal()) {
1323 Constant* c = x->as_Constant();
1324 if (c != NULL) {
1325 x->set_operand(LIR_OprFact::value_type(c->type()));
1326 } else {
1327 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1328 // allocate a virtual register for this local or phi
1329 x->set_operand(rlock(x));
1330 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1331 }
1332 }
1333 return x->operand();
1334 }
1337 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1338 if (opr->is_virtual()) {
1339 return instruction_for_vreg(opr->vreg_number());
1340 }
1341 return NULL;
1342 }
1345 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1346 if (reg_num < _instruction_for_operand.length()) {
1347 return _instruction_for_operand.at(reg_num);
1348 }
1349 return NULL;
1350 }
1353 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1354 if (_vreg_flags.size_in_bits() == 0) {
1355 BitMap2D temp(100, num_vreg_flags);
1356 temp.clear();
1357 _vreg_flags = temp;
1358 }
1359 _vreg_flags.at_put_grow(vreg_num, f, true);
1360 }
1362 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1363 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1364 return false;
1365 }
1366 return _vreg_flags.at(vreg_num, f);
1367 }
1370 // Block local constant handling. This code is useful for keeping
1371 // unpinned constants and constants which aren't exposed in the IR in
1372 // registers. Unpinned Constant instructions have their operands
1373 // cleared when the block is finished so that other blocks can't end
1374 // up referring to their registers.
1376 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1377 assert(!x->is_pinned(), "only for unpinned constants");
1378 _unpinned_constants.append(x);
1379 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1380 }
1383 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1384 BasicType t = c->type();
1385 for (int i = 0; i < _constants.length(); i++) {
1386 LIR_Const* other = _constants.at(i);
1387 if (t == other->type()) {
1388 switch (t) {
1389 case T_INT:
1390 case T_FLOAT:
1391 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1392 break;
1393 case T_LONG:
1394 case T_DOUBLE:
1395 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1396 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1397 break;
1398 case T_OBJECT:
1399 if (c->as_jobject() != other->as_jobject()) continue;
1400 break;
1401 }
1402 return _reg_for_constants.at(i);
1403 }
1404 }
1406 LIR_Opr result = new_register(t);
1407 __ move((LIR_Opr)c, result);
1408 _constants.append(c);
1409 _reg_for_constants.append(result);
1410 return result;
1411 }
1413 // Various barriers
1415 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1416 bool do_load, bool patch, CodeEmitInfo* info) {
1417 // Do the pre-write barrier, if any.
1418 switch (_bs->kind()) {
1419 #ifndef SERIALGC
1420 case BarrierSet::G1SATBCT:
1421 case BarrierSet::G1SATBCTLogging:
1422 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1423 break;
1424 #endif // SERIALGC
1425 case BarrierSet::CardTableModRef:
1426 case BarrierSet::CardTableExtension:
1427 // No pre barriers
1428 break;
1429 case BarrierSet::ModRef:
1430 case BarrierSet::Other:
1431 // No pre barriers
1432 break;
1433 default :
1434 ShouldNotReachHere();
1436 }
1437 }
1439 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1440 switch (_bs->kind()) {
1441 #ifndef SERIALGC
1442 case BarrierSet::G1SATBCT:
1443 case BarrierSet::G1SATBCTLogging:
1444 G1SATBCardTableModRef_post_barrier(addr, new_val);
1445 break;
1446 #endif // SERIALGC
1447 case BarrierSet::CardTableModRef:
1448 case BarrierSet::CardTableExtension:
1449 CardTableModRef_post_barrier(addr, new_val);
1450 break;
1451 case BarrierSet::ModRef:
1452 case BarrierSet::Other:
1453 // No post barriers
1454 break;
1455 default :
1456 ShouldNotReachHere();
1457 }
1458 }
1460 ////////////////////////////////////////////////////////////////////////
1461 #ifndef SERIALGC
1463 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1464 bool do_load, bool patch, CodeEmitInfo* info) {
1465 // First we test whether marking is in progress.
1466 BasicType flag_type;
1467 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1468 flag_type = T_INT;
1469 } else {
1470 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1471 "Assumption");
1472 flag_type = T_BYTE;
1473 }
1474 LIR_Opr thrd = getThreadPointer();
1475 LIR_Address* mark_active_flag_addr =
1476 new LIR_Address(thrd,
1477 in_bytes(JavaThread::satb_mark_queue_offset() +
1478 PtrQueue::byte_offset_of_active()),
1479 flag_type);
1480 // Read the marking-in-progress flag.
1481 LIR_Opr flag_val = new_register(T_INT);
1482 __ load(mark_active_flag_addr, flag_val);
1483 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1485 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1487 CodeStub* slow;
1489 if (do_load) {
1490 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1491 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1493 if (patch)
1494 pre_val_patch_code = lir_patch_normal;
1496 pre_val = new_register(T_OBJECT);
1498 if (!addr_opr->is_address()) {
1499 assert(addr_opr->is_register(), "must be");
1500 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1501 }
1502 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1503 } else {
1504 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1505 assert(pre_val->is_register(), "must be");
1506 assert(pre_val->type() == T_OBJECT, "must be an object");
1507 assert(info == NULL, "sanity");
1509 slow = new G1PreBarrierStub(pre_val);
1510 }
1512 __ branch(lir_cond_notEqual, T_INT, slow);
1513 __ branch_destination(slow->continuation());
1514 }
1516 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1517 // If the "new_val" is a constant NULL, no barrier is necessary.
1518 if (new_val->is_constant() &&
1519 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1521 if (!new_val->is_register()) {
1522 LIR_Opr new_val_reg = new_register(T_OBJECT);
1523 if (new_val->is_constant()) {
1524 __ move(new_val, new_val_reg);
1525 } else {
1526 __ leal(new_val, new_val_reg);
1527 }
1528 new_val = new_val_reg;
1529 }
1530 assert(new_val->is_register(), "must be a register at this point");
1532 if (addr->is_address()) {
1533 LIR_Address* address = addr->as_address_ptr();
1534 LIR_Opr ptr = new_pointer_register();
1535 if (!address->index()->is_valid() && address->disp() == 0) {
1536 __ move(address->base(), ptr);
1537 } else {
1538 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1539 __ leal(addr, ptr);
1540 }
1541 addr = ptr;
1542 }
1543 assert(addr->is_register(), "must be a register at this point");
1545 LIR_Opr xor_res = new_pointer_register();
1546 LIR_Opr xor_shift_res = new_pointer_register();
1547 if (TwoOperandLIRForm ) {
1548 __ move(addr, xor_res);
1549 __ logical_xor(xor_res, new_val, xor_res);
1550 __ move(xor_res, xor_shift_res);
1551 __ unsigned_shift_right(xor_shift_res,
1552 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1553 xor_shift_res,
1554 LIR_OprDesc::illegalOpr());
1555 } else {
1556 __ logical_xor(addr, new_val, xor_res);
1557 __ unsigned_shift_right(xor_res,
1558 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1559 xor_shift_res,
1560 LIR_OprDesc::illegalOpr());
1561 }
1563 if (!new_val->is_register()) {
1564 LIR_Opr new_val_reg = new_register(T_OBJECT);
1565 __ leal(new_val, new_val_reg);
1566 new_val = new_val_reg;
1567 }
1568 assert(new_val->is_register(), "must be a register at this point");
1570 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1572 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1573 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1574 __ branch_destination(slow->continuation());
1575 }
1577 #endif // SERIALGC
1578 ////////////////////////////////////////////////////////////////////////
1580 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1582 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1583 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1584 if (addr->is_address()) {
1585 LIR_Address* address = addr->as_address_ptr();
1586 // ptr cannot be an object because we use this barrier for array card marks
1587 // and addr can point in the middle of an array.
1588 LIR_Opr ptr = new_pointer_register();
1589 if (!address->index()->is_valid() && address->disp() == 0) {
1590 __ move(address->base(), ptr);
1591 } else {
1592 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1593 __ leal(addr, ptr);
1594 }
1595 addr = ptr;
1596 }
1597 assert(addr->is_register(), "must be a register at this point");
1599 #ifdef ARM
1600 // TODO: ARM - move to platform-dependent code
1601 LIR_Opr tmp = FrameMap::R14_opr;
1602 if (VM_Version::supports_movw()) {
1603 __ move((LIR_Opr)card_table_base, tmp);
1604 } else {
1605 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1606 }
1608 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1609 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1610 if(((int)ct->byte_map_base & 0xff) == 0) {
1611 __ move(tmp, card_addr);
1612 } else {
1613 LIR_Opr tmp_zero = new_register(T_INT);
1614 __ move(LIR_OprFact::intConst(0), tmp_zero);
1615 __ move(tmp_zero, card_addr);
1616 }
1617 #else // ARM
1618 LIR_Opr tmp = new_pointer_register();
1619 if (TwoOperandLIRForm) {
1620 __ move(addr, tmp);
1621 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1622 } else {
1623 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1624 }
1625 if (can_inline_as_constant(card_table_base)) {
1626 __ move(LIR_OprFact::intConst(0),
1627 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1628 } else {
1629 __ move(LIR_OprFact::intConst(0),
1630 new LIR_Address(tmp, load_constant(card_table_base),
1631 T_BYTE));
1632 }
1633 #endif // ARM
1634 }
1637 //------------------------field access--------------------------------------
1639 // Comment copied form templateTable_i486.cpp
1640 // ----------------------------------------------------------------------------
1641 // Volatile variables demand their effects be made known to all CPU's in
1642 // order. Store buffers on most chips allow reads & writes to reorder; the
1643 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1644 // memory barrier (i.e., it's not sufficient that the interpreter does not
1645 // reorder volatile references, the hardware also must not reorder them).
1646 //
1647 // According to the new Java Memory Model (JMM):
1648 // (1) All volatiles are serialized wrt to each other.
1649 // ALSO reads & writes act as aquire & release, so:
1650 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1651 // the read float up to before the read. It's OK for non-volatile memory refs
1652 // that happen before the volatile read to float down below it.
1653 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1654 // that happen BEFORE the write float down to after the write. It's OK for
1655 // non-volatile memory refs that happen after the volatile write to float up
1656 // before it.
1657 //
1658 // We only put in barriers around volatile refs (they are expensive), not
1659 // _between_ memory refs (that would require us to track the flavor of the
1660 // previous memory refs). Requirements (2) and (3) require some barriers
1661 // before volatile stores and after volatile loads. These nearly cover
1662 // requirement (1) but miss the volatile-store-volatile-load case. This final
1663 // case is placed after volatile-stores although it could just as well go
1664 // before volatile-loads.
1667 void LIRGenerator::do_StoreField(StoreField* x) {
1668 bool needs_patching = x->needs_patching();
1669 bool is_volatile = x->field()->is_volatile();
1670 BasicType field_type = x->field_type();
1671 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1673 CodeEmitInfo* info = NULL;
1674 if (needs_patching) {
1675 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1676 info = state_for(x, x->state_before());
1677 } else if (x->needs_null_check()) {
1678 NullCheck* nc = x->explicit_null_check();
1679 if (nc == NULL) {
1680 info = state_for(x);
1681 } else {
1682 info = state_for(nc);
1683 }
1684 }
1687 LIRItem object(x->obj(), this);
1688 LIRItem value(x->value(), this);
1690 object.load_item();
1692 if (is_volatile || needs_patching) {
1693 // load item if field is volatile (fewer special cases for volatiles)
1694 // load item if field not initialized
1695 // load item if field not constant
1696 // because of code patching we cannot inline constants
1697 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1698 value.load_byte_item();
1699 } else {
1700 value.load_item();
1701 }
1702 } else {
1703 value.load_for_store(field_type);
1704 }
1706 set_no_result(x);
1708 #ifndef PRODUCT
1709 if (PrintNotLoaded && needs_patching) {
1710 tty->print_cr(" ###class not loaded at store_%s bci %d",
1711 x->is_static() ? "static" : "field", x->printable_bci());
1712 }
1713 #endif
1715 if (x->needs_null_check() &&
1716 (needs_patching ||
1717 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1718 // emit an explicit null check because the offset is too large
1719 __ null_check(object.result(), new CodeEmitInfo(info));
1720 }
1722 LIR_Address* address;
1723 if (needs_patching) {
1724 // we need to patch the offset in the instruction so don't allow
1725 // generate_address to try to be smart about emitting the -1.
1726 // Otherwise the patching code won't know how to find the
1727 // instruction to patch.
1728 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1729 } else {
1730 address = generate_address(object.result(), x->offset(), field_type);
1731 }
1733 if (is_volatile && os::is_MP()) {
1734 __ membar_release();
1735 }
1737 if (is_oop) {
1738 // Do the pre-write barrier, if any.
1739 pre_barrier(LIR_OprFact::address(address),
1740 LIR_OprFact::illegalOpr /* pre_val */,
1741 true /* do_load*/,
1742 needs_patching,
1743 (info ? new CodeEmitInfo(info) : NULL));
1744 }
1746 if (is_volatile && !needs_patching) {
1747 volatile_field_store(value.result(), address, info);
1748 } else {
1749 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1750 __ store(value.result(), address, info, patch_code);
1751 }
1753 if (is_oop) {
1754 // Store to object so mark the card of the header
1755 post_barrier(object.result(), value.result());
1756 }
1758 if (is_volatile && os::is_MP()) {
1759 __ membar();
1760 }
1761 }
1764 void LIRGenerator::do_LoadField(LoadField* x) {
1765 bool needs_patching = x->needs_patching();
1766 bool is_volatile = x->field()->is_volatile();
1767 BasicType field_type = x->field_type();
1769 CodeEmitInfo* info = NULL;
1770 if (needs_patching) {
1771 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1772 info = state_for(x, x->state_before());
1773 } else if (x->needs_null_check()) {
1774 NullCheck* nc = x->explicit_null_check();
1775 if (nc == NULL) {
1776 info = state_for(x);
1777 } else {
1778 info = state_for(nc);
1779 }
1780 }
1782 LIRItem object(x->obj(), this);
1784 object.load_item();
1786 #ifndef PRODUCT
1787 if (PrintNotLoaded && needs_patching) {
1788 tty->print_cr(" ###class not loaded at load_%s bci %d",
1789 x->is_static() ? "static" : "field", x->printable_bci());
1790 }
1791 #endif
1793 if (x->needs_null_check() &&
1794 (needs_patching ||
1795 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1796 // emit an explicit null check because the offset is too large
1797 __ null_check(object.result(), new CodeEmitInfo(info));
1798 }
1800 LIR_Opr reg = rlock_result(x, field_type);
1801 LIR_Address* address;
1802 if (needs_patching) {
1803 // we need to patch the offset in the instruction so don't allow
1804 // generate_address to try to be smart about emitting the -1.
1805 // Otherwise the patching code won't know how to find the
1806 // instruction to patch.
1807 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1808 } else {
1809 address = generate_address(object.result(), x->offset(), field_type);
1810 }
1812 if (is_volatile && !needs_patching) {
1813 volatile_field_load(address, reg, info);
1814 } else {
1815 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1816 __ load(address, reg, info, patch_code);
1817 }
1819 if (is_volatile && os::is_MP()) {
1820 __ membar_acquire();
1821 }
1822 }
1825 //------------------------java.nio.Buffer.checkIndex------------------------
1827 // int java.nio.Buffer.checkIndex(int)
1828 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1829 // NOTE: by the time we are in checkIndex() we are guaranteed that
1830 // the buffer is non-null (because checkIndex is package-private and
1831 // only called from within other methods in the buffer).
1832 assert(x->number_of_arguments() == 2, "wrong type");
1833 LIRItem buf (x->argument_at(0), this);
1834 LIRItem index(x->argument_at(1), this);
1835 buf.load_item();
1836 index.load_item();
1838 LIR_Opr result = rlock_result(x);
1839 if (GenerateRangeChecks) {
1840 CodeEmitInfo* info = state_for(x);
1841 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1842 if (index.result()->is_constant()) {
1843 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1844 __ branch(lir_cond_belowEqual, T_INT, stub);
1845 } else {
1846 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1847 java_nio_Buffer::limit_offset(), T_INT, info);
1848 __ branch(lir_cond_aboveEqual, T_INT, stub);
1849 }
1850 __ move(index.result(), result);
1851 } else {
1852 // Just load the index into the result register
1853 __ move(index.result(), result);
1854 }
1855 }
1858 //------------------------array access--------------------------------------
1861 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1862 LIRItem array(x->array(), this);
1863 array.load_item();
1864 LIR_Opr reg = rlock_result(x);
1866 CodeEmitInfo* info = NULL;
1867 if (x->needs_null_check()) {
1868 NullCheck* nc = x->explicit_null_check();
1869 if (nc == NULL) {
1870 info = state_for(x);
1871 } else {
1872 info = state_for(nc);
1873 }
1874 }
1875 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1876 }
1879 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1880 bool use_length = x->length() != NULL;
1881 LIRItem array(x->array(), this);
1882 LIRItem index(x->index(), this);
1883 LIRItem length(this);
1884 bool needs_range_check = true;
1886 if (use_length) {
1887 needs_range_check = x->compute_needs_range_check();
1888 if (needs_range_check) {
1889 length.set_instruction(x->length());
1890 length.load_item();
1891 }
1892 }
1894 array.load_item();
1895 if (index.is_constant() && can_inline_as_constant(x->index())) {
1896 // let it be a constant
1897 index.dont_load_item();
1898 } else {
1899 index.load_item();
1900 }
1902 CodeEmitInfo* range_check_info = state_for(x);
1903 CodeEmitInfo* null_check_info = NULL;
1904 if (x->needs_null_check()) {
1905 NullCheck* nc = x->explicit_null_check();
1906 if (nc != NULL) {
1907 null_check_info = state_for(nc);
1908 } else {
1909 null_check_info = range_check_info;
1910 }
1911 }
1913 // emit array address setup early so it schedules better
1914 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1916 if (GenerateRangeChecks && needs_range_check) {
1917 if (use_length) {
1918 // TODO: use a (modified) version of array_range_check that does not require a
1919 // constant length to be loaded to a register
1920 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1921 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1922 } else {
1923 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1924 // The range check performs the null check, so clear it out for the load
1925 null_check_info = NULL;
1926 }
1927 }
1929 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1930 }
1933 void LIRGenerator::do_NullCheck(NullCheck* x) {
1934 if (x->can_trap()) {
1935 LIRItem value(x->obj(), this);
1936 value.load_item();
1937 CodeEmitInfo* info = state_for(x);
1938 __ null_check(value.result(), info);
1939 }
1940 }
1943 void LIRGenerator::do_TypeCast(TypeCast* x) {
1944 LIRItem value(x->obj(), this);
1945 value.load_item();
1946 // the result is the same as from the node we are casting
1947 set_result(x, value.result());
1948 }
1951 void LIRGenerator::do_Throw(Throw* x) {
1952 LIRItem exception(x->exception(), this);
1953 exception.load_item();
1954 set_no_result(x);
1955 LIR_Opr exception_opr = exception.result();
1956 CodeEmitInfo* info = state_for(x, x->state());
1958 #ifndef PRODUCT
1959 if (PrintC1Statistics) {
1960 increment_counter(Runtime1::throw_count_address(), T_INT);
1961 }
1962 #endif
1964 // check if the instruction has an xhandler in any of the nested scopes
1965 bool unwind = false;
1966 if (info->exception_handlers()->length() == 0) {
1967 // this throw is not inside an xhandler
1968 unwind = true;
1969 } else {
1970 // get some idea of the throw type
1971 bool type_is_exact = true;
1972 ciType* throw_type = x->exception()->exact_type();
1973 if (throw_type == NULL) {
1974 type_is_exact = false;
1975 throw_type = x->exception()->declared_type();
1976 }
1977 if (throw_type != NULL && throw_type->is_instance_klass()) {
1978 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1979 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1980 }
1981 }
1983 // do null check before moving exception oop into fixed register
1984 // to avoid a fixed interval with an oop during the null check.
1985 // Use a copy of the CodeEmitInfo because debug information is
1986 // different for null_check and throw.
1987 if (GenerateCompilerNullChecks &&
1988 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1989 // if the exception object wasn't created using new then it might be null.
1990 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1991 }
1993 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1994 // we need to go through the exception lookup path to get JVMTI
1995 // notification done
1996 unwind = false;
1997 }
1999 // move exception oop into fixed register
2000 __ move(exception_opr, exceptionOopOpr());
2002 if (unwind) {
2003 __ unwind_exception(exceptionOopOpr());
2004 } else {
2005 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2006 }
2007 }
2010 void LIRGenerator::do_RoundFP(RoundFP* x) {
2011 LIRItem input(x->input(), this);
2012 input.load_item();
2013 LIR_Opr input_opr = input.result();
2014 assert(input_opr->is_register(), "why round if value is not in a register?");
2015 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2016 if (input_opr->is_single_fpu()) {
2017 set_result(x, round_item(input_opr)); // This code path not currently taken
2018 } else {
2019 LIR_Opr result = new_register(T_DOUBLE);
2020 set_vreg_flag(result, must_start_in_memory);
2021 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2022 set_result(x, result);
2023 }
2024 }
2026 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2027 LIRItem base(x->base(), this);
2028 LIRItem idx(this);
2030 base.load_item();
2031 if (x->has_index()) {
2032 idx.set_instruction(x->index());
2033 idx.load_nonconstant();
2034 }
2036 LIR_Opr reg = rlock_result(x, x->basic_type());
2038 int log2_scale = 0;
2039 if (x->has_index()) {
2040 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2041 log2_scale = x->log2_scale();
2042 }
2044 assert(!x->has_index() || idx.value() == x->index(), "should match");
2046 LIR_Opr base_op = base.result();
2047 #ifndef _LP64
2048 if (x->base()->type()->tag() == longTag) {
2049 base_op = new_register(T_INT);
2050 __ convert(Bytecodes::_l2i, base.result(), base_op);
2051 } else {
2052 assert(x->base()->type()->tag() == intTag, "must be");
2053 }
2054 #endif
2056 BasicType dst_type = x->basic_type();
2057 LIR_Opr index_op = idx.result();
2059 LIR_Address* addr;
2060 if (index_op->is_constant()) {
2061 assert(log2_scale == 0, "must not have a scale");
2062 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2063 } else {
2064 #ifdef X86
2065 #ifdef _LP64
2066 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2067 LIR_Opr tmp = new_pointer_register();
2068 __ convert(Bytecodes::_i2l, index_op, tmp);
2069 index_op = tmp;
2070 }
2071 #endif
2072 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2073 #elif defined(ARM)
2074 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2075 #else
2076 if (index_op->is_illegal() || log2_scale == 0) {
2077 #ifdef _LP64
2078 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2079 LIR_Opr tmp = new_pointer_register();
2080 __ convert(Bytecodes::_i2l, index_op, tmp);
2081 index_op = tmp;
2082 }
2083 #endif
2084 addr = new LIR_Address(base_op, index_op, dst_type);
2085 } else {
2086 LIR_Opr tmp = new_pointer_register();
2087 __ shift_left(index_op, log2_scale, tmp);
2088 addr = new LIR_Address(base_op, tmp, dst_type);
2089 }
2090 #endif
2091 }
2093 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2094 __ unaligned_move(addr, reg);
2095 } else {
2096 if (dst_type == T_OBJECT && x->is_wide()) {
2097 __ move_wide(addr, reg);
2098 } else {
2099 __ move(addr, reg);
2100 }
2101 }
2102 }
2105 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2106 int log2_scale = 0;
2107 BasicType type = x->basic_type();
2109 if (x->has_index()) {
2110 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2111 log2_scale = x->log2_scale();
2112 }
2114 LIRItem base(x->base(), this);
2115 LIRItem value(x->value(), this);
2116 LIRItem idx(this);
2118 base.load_item();
2119 if (x->has_index()) {
2120 idx.set_instruction(x->index());
2121 idx.load_item();
2122 }
2124 if (type == T_BYTE || type == T_BOOLEAN) {
2125 value.load_byte_item();
2126 } else {
2127 value.load_item();
2128 }
2130 set_no_result(x);
2132 LIR_Opr base_op = base.result();
2133 #ifndef _LP64
2134 if (x->base()->type()->tag() == longTag) {
2135 base_op = new_register(T_INT);
2136 __ convert(Bytecodes::_l2i, base.result(), base_op);
2137 } else {
2138 assert(x->base()->type()->tag() == intTag, "must be");
2139 }
2140 #endif
2142 LIR_Opr index_op = idx.result();
2143 if (log2_scale != 0) {
2144 // temporary fix (platform dependent code without shift on Intel would be better)
2145 index_op = new_pointer_register();
2146 #ifdef _LP64
2147 if(idx.result()->type() == T_INT) {
2148 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2149 } else {
2150 #endif
2151 // TODO: ARM also allows embedded shift in the address
2152 __ move(idx.result(), index_op);
2153 #ifdef _LP64
2154 }
2155 #endif
2156 __ shift_left(index_op, log2_scale, index_op);
2157 }
2158 #ifdef _LP64
2159 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
2160 LIR_Opr tmp = new_pointer_register();
2161 __ convert(Bytecodes::_i2l, index_op, tmp);
2162 index_op = tmp;
2163 }
2164 #endif
2166 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2167 __ move(value.result(), addr);
2168 }
2171 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2172 BasicType type = x->basic_type();
2173 LIRItem src(x->object(), this);
2174 LIRItem off(x->offset(), this);
2176 off.load_item();
2177 src.load_item();
2179 LIR_Opr value = rlock_result(x, x->basic_type());
2181 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2183 #ifndef SERIALGC
2184 // We might be reading the value of the referent field of a
2185 // Reference object in order to attach it back to the live
2186 // object graph. If G1 is enabled then we need to record
2187 // the value that is being returned in an SATB log buffer.
2188 //
2189 // We need to generate code similar to the following...
2190 //
2191 // if (offset == java_lang_ref_Reference::referent_offset) {
2192 // if (src != NULL) {
2193 // if (klass(src)->reference_type() != REF_NONE) {
2194 // pre_barrier(..., value, ...);
2195 // }
2196 // }
2197 // }
2199 if (UseG1GC && type == T_OBJECT) {
2200 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2201 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2202 bool gen_source_check = true; // Assume we need to check the src object for null.
2203 bool gen_type_check = true; // Assume we need to check the reference_type.
2205 if (off.is_constant()) {
2206 jlong off_con = (off.type()->is_int() ?
2207 (jlong) off.get_jint_constant() :
2208 off.get_jlong_constant());
2211 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2212 // The constant offset is something other than referent_offset.
2213 // We can skip generating/checking the remaining guards and
2214 // skip generation of the code stub.
2215 gen_pre_barrier = false;
2216 } else {
2217 // The constant offset is the same as referent_offset -
2218 // we do not need to generate a runtime offset check.
2219 gen_offset_check = false;
2220 }
2221 }
2223 // We don't need to generate stub if the source object is an array
2224 if (gen_pre_barrier && src.type()->is_array()) {
2225 gen_pre_barrier = false;
2226 }
2228 if (gen_pre_barrier) {
2229 // We still need to continue with the checks.
2230 if (src.is_constant()) {
2231 ciObject* src_con = src.get_jobject_constant();
2233 if (src_con->is_null_object()) {
2234 // The constant src object is null - We can skip
2235 // generating the code stub.
2236 gen_pre_barrier = false;
2237 } else {
2238 // Non-null constant source object. We still have to generate
2239 // the slow stub - but we don't need to generate the runtime
2240 // null object check.
2241 gen_source_check = false;
2242 }
2243 }
2244 }
2245 if (gen_pre_barrier && !PatchALot) {
2246 // Can the klass of object be statically determined to be
2247 // a sub-class of Reference?
2248 ciType* type = src.value()->declared_type();
2249 if ((type != NULL) && type->is_loaded()) {
2250 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2251 gen_type_check = false;
2252 } else if (type->is_klass() &&
2253 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2254 // Not Reference and not Object klass.
2255 gen_pre_barrier = false;
2256 }
2257 }
2258 }
2260 if (gen_pre_barrier) {
2261 LabelObj* Lcont = new LabelObj();
2263 // We can have generate one runtime check here. Let's start with
2264 // the offset check.
2265 if (gen_offset_check) {
2266 // if (offset != referent_offset) -> continue
2267 // If offset is an int then we can do the comparison with the
2268 // referent_offset constant; otherwise we need to move
2269 // referent_offset into a temporary register and generate
2270 // a reg-reg compare.
2272 LIR_Opr referent_off;
2274 if (off.type()->is_int()) {
2275 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2276 } else {
2277 assert(off.type()->is_long(), "what else?");
2278 referent_off = new_register(T_LONG);
2279 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2280 }
2281 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2282 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2283 }
2284 if (gen_source_check) {
2285 // offset is a const and equals referent offset
2286 // if (source == null) -> continue
2287 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2288 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2289 }
2290 LIR_Opr src_klass = new_register(T_OBJECT);
2291 if (gen_type_check) {
2292 // We have determined that offset == referent_offset && src != null.
2293 // if (src->_klass->_reference_type == REF_NONE) -> continue
2294 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), src_klass);
2295 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(instanceKlass::reference_type_offset()), T_BYTE);
2296 LIR_Opr reference_type = new_register(T_INT);
2297 __ move(reference_type_addr, reference_type);
2298 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2299 __ branch(lir_cond_equal, T_INT, Lcont->label());
2300 }
2301 {
2302 // We have determined that src->_klass->_reference_type != REF_NONE
2303 // so register the value in the referent field with the pre-barrier.
2304 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2305 value /* pre_val */,
2306 false /* do_load */,
2307 false /* patch */,
2308 NULL /* info */);
2309 }
2310 __ branch_destination(Lcont->label());
2311 }
2312 }
2313 #endif // SERIALGC
2315 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2316 }
2319 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2320 BasicType type = x->basic_type();
2321 LIRItem src(x->object(), this);
2322 LIRItem off(x->offset(), this);
2323 LIRItem data(x->value(), this);
2325 src.load_item();
2326 if (type == T_BOOLEAN || type == T_BYTE) {
2327 data.load_byte_item();
2328 } else {
2329 data.load_item();
2330 }
2331 off.load_item();
2333 set_no_result(x);
2335 if (x->is_volatile() && os::is_MP()) __ membar_release();
2336 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2337 if (x->is_volatile() && os::is_MP()) __ membar();
2338 }
2341 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2342 LIRItem src(x->object(), this);
2343 LIRItem off(x->offset(), this);
2345 src.load_item();
2346 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2347 // let it be a constant
2348 off.dont_load_item();
2349 } else {
2350 off.load_item();
2351 }
2353 set_no_result(x);
2355 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2356 __ prefetch(addr, is_store);
2357 }
2360 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2361 do_UnsafePrefetch(x, false);
2362 }
2365 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2366 do_UnsafePrefetch(x, true);
2367 }
2370 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2371 int lng = x->length();
2373 for (int i = 0; i < lng; i++) {
2374 SwitchRange* one_range = x->at(i);
2375 int low_key = one_range->low_key();
2376 int high_key = one_range->high_key();
2377 BlockBegin* dest = one_range->sux();
2378 if (low_key == high_key) {
2379 __ cmp(lir_cond_equal, value, low_key);
2380 __ branch(lir_cond_equal, T_INT, dest);
2381 } else if (high_key - low_key == 1) {
2382 __ cmp(lir_cond_equal, value, low_key);
2383 __ branch(lir_cond_equal, T_INT, dest);
2384 __ cmp(lir_cond_equal, value, high_key);
2385 __ branch(lir_cond_equal, T_INT, dest);
2386 } else {
2387 LabelObj* L = new LabelObj();
2388 __ cmp(lir_cond_less, value, low_key);
2389 __ branch(lir_cond_less, T_INT, L->label());
2390 __ cmp(lir_cond_lessEqual, value, high_key);
2391 __ branch(lir_cond_lessEqual, T_INT, dest);
2392 __ branch_destination(L->label());
2393 }
2394 }
2395 __ jump(default_sux);
2396 }
2399 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2400 SwitchRangeList* res = new SwitchRangeList();
2401 int len = x->length();
2402 if (len > 0) {
2403 BlockBegin* sux = x->sux_at(0);
2404 int key = x->lo_key();
2405 BlockBegin* default_sux = x->default_sux();
2406 SwitchRange* range = new SwitchRange(key, sux);
2407 for (int i = 0; i < len; i++, key++) {
2408 BlockBegin* new_sux = x->sux_at(i);
2409 if (sux == new_sux) {
2410 // still in same range
2411 range->set_high_key(key);
2412 } else {
2413 // skip tests which explicitly dispatch to the default
2414 if (sux != default_sux) {
2415 res->append(range);
2416 }
2417 range = new SwitchRange(key, new_sux);
2418 }
2419 sux = new_sux;
2420 }
2421 if (res->length() == 0 || res->last() != range) res->append(range);
2422 }
2423 return res;
2424 }
2427 // we expect the keys to be sorted by increasing value
2428 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2429 SwitchRangeList* res = new SwitchRangeList();
2430 int len = x->length();
2431 if (len > 0) {
2432 BlockBegin* default_sux = x->default_sux();
2433 int key = x->key_at(0);
2434 BlockBegin* sux = x->sux_at(0);
2435 SwitchRange* range = new SwitchRange(key, sux);
2436 for (int i = 1; i < len; i++) {
2437 int new_key = x->key_at(i);
2438 BlockBegin* new_sux = x->sux_at(i);
2439 if (key+1 == new_key && sux == new_sux) {
2440 // still in same range
2441 range->set_high_key(new_key);
2442 } else {
2443 // skip tests which explicitly dispatch to the default
2444 if (range->sux() != default_sux) {
2445 res->append(range);
2446 }
2447 range = new SwitchRange(new_key, new_sux);
2448 }
2449 key = new_key;
2450 sux = new_sux;
2451 }
2452 if (res->length() == 0 || res->last() != range) res->append(range);
2453 }
2454 return res;
2455 }
2458 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2459 LIRItem tag(x->tag(), this);
2460 tag.load_item();
2461 set_no_result(x);
2463 if (x->is_safepoint()) {
2464 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2465 }
2467 // move values into phi locations
2468 move_to_phi(x->state());
2470 int lo_key = x->lo_key();
2471 int hi_key = x->hi_key();
2472 int len = x->length();
2473 LIR_Opr value = tag.result();
2474 if (UseTableRanges) {
2475 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2476 } else {
2477 for (int i = 0; i < len; i++) {
2478 __ cmp(lir_cond_equal, value, i + lo_key);
2479 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2480 }
2481 __ jump(x->default_sux());
2482 }
2483 }
2486 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2487 LIRItem tag(x->tag(), this);
2488 tag.load_item();
2489 set_no_result(x);
2491 if (x->is_safepoint()) {
2492 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2493 }
2495 // move values into phi locations
2496 move_to_phi(x->state());
2498 LIR_Opr value = tag.result();
2499 if (UseTableRanges) {
2500 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2501 } else {
2502 int len = x->length();
2503 for (int i = 0; i < len; i++) {
2504 __ cmp(lir_cond_equal, value, x->key_at(i));
2505 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2506 }
2507 __ jump(x->default_sux());
2508 }
2509 }
2512 void LIRGenerator::do_Goto(Goto* x) {
2513 set_no_result(x);
2515 if (block()->next()->as_OsrEntry()) {
2516 // need to free up storage used for OSR entry point
2517 LIR_Opr osrBuffer = block()->next()->operand();
2518 BasicTypeList signature;
2519 signature.append(T_INT);
2520 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2521 __ move(osrBuffer, cc->args()->at(0));
2522 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2523 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2524 }
2526 if (x->is_safepoint()) {
2527 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2529 // increment backedge counter if needed
2530 CodeEmitInfo* info = state_for(x, state);
2531 increment_backedge_counter(info, x->profiled_bci());
2532 CodeEmitInfo* safepoint_info = state_for(x, state);
2533 __ safepoint(safepoint_poll_register(), safepoint_info);
2534 }
2536 // Gotos can be folded Ifs, handle this case.
2537 if (x->should_profile()) {
2538 ciMethod* method = x->profiled_method();
2539 assert(method != NULL, "method should be set if branch is profiled");
2540 ciMethodData* md = method->method_data_or_null();
2541 assert(md != NULL, "Sanity");
2542 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2543 assert(data != NULL, "must have profiling data");
2544 int offset;
2545 if (x->direction() == Goto::taken) {
2546 assert(data->is_BranchData(), "need BranchData for two-way branches");
2547 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2548 } else if (x->direction() == Goto::not_taken) {
2549 assert(data->is_BranchData(), "need BranchData for two-way branches");
2550 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2551 } else {
2552 assert(data->is_JumpData(), "need JumpData for branches");
2553 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2554 }
2555 LIR_Opr md_reg = new_register(T_OBJECT);
2556 __ oop2reg(md->constant_encoding(), md_reg);
2558 increment_counter(new LIR_Address(md_reg, offset,
2559 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2560 }
2562 // emit phi-instruction move after safepoint since this simplifies
2563 // describing the state as the safepoint.
2564 move_to_phi(x->state());
2566 __ jump(x->default_sux());
2567 }
2570 void LIRGenerator::do_Base(Base* x) {
2571 __ std_entry(LIR_OprFact::illegalOpr);
2572 // Emit moves from physical registers / stack slots to virtual registers
2573 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2574 IRScope* irScope = compilation()->hir()->top_scope();
2575 int java_index = 0;
2576 for (int i = 0; i < args->length(); i++) {
2577 LIR_Opr src = args->at(i);
2578 assert(!src->is_illegal(), "check");
2579 BasicType t = src->type();
2581 // Types which are smaller than int are passed as int, so
2582 // correct the type which passed.
2583 switch (t) {
2584 case T_BYTE:
2585 case T_BOOLEAN:
2586 case T_SHORT:
2587 case T_CHAR:
2588 t = T_INT;
2589 break;
2590 }
2592 LIR_Opr dest = new_register(t);
2593 __ move(src, dest);
2595 // Assign new location to Local instruction for this local
2596 Local* local = x->state()->local_at(java_index)->as_Local();
2597 assert(local != NULL, "Locals for incoming arguments must have been created");
2598 #ifndef __SOFTFP__
2599 // The java calling convention passes double as long and float as int.
2600 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2601 #endif // __SOFTFP__
2602 local->set_operand(dest);
2603 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2604 java_index += type2size[t];
2605 }
2607 if (compilation()->env()->dtrace_method_probes()) {
2608 BasicTypeList signature;
2609 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2610 signature.append(T_OBJECT); // methodOop
2611 LIR_OprList* args = new LIR_OprList();
2612 args->append(getThreadPointer());
2613 LIR_Opr meth = new_register(T_OBJECT);
2614 __ oop2reg(method()->constant_encoding(), meth);
2615 args->append(meth);
2616 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2617 }
2619 if (method()->is_synchronized()) {
2620 LIR_Opr obj;
2621 if (method()->is_static()) {
2622 obj = new_register(T_OBJECT);
2623 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2624 } else {
2625 Local* receiver = x->state()->local_at(0)->as_Local();
2626 assert(receiver != NULL, "must already exist");
2627 obj = receiver->operand();
2628 }
2629 assert(obj->is_valid(), "must be valid");
2631 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2632 LIR_Opr lock = new_register(T_INT);
2633 __ load_stack_address_monitor(0, lock);
2635 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2636 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2638 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2639 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2640 }
2641 }
2643 // increment invocation counters if needed
2644 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2645 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2646 increment_invocation_counter(info);
2647 }
2649 // all blocks with a successor must end with an unconditional jump
2650 // to the successor even if they are consecutive
2651 __ jump(x->default_sux());
2652 }
2655 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2656 // construct our frame and model the production of incoming pointer
2657 // to the OSR buffer.
2658 __ osr_entry(LIR_Assembler::osrBufferPointer());
2659 LIR_Opr result = rlock_result(x);
2660 __ move(LIR_Assembler::osrBufferPointer(), result);
2661 }
2664 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2665 int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
2666 for (; i < args->length(); i++) {
2667 LIRItem* param = args->at(i);
2668 LIR_Opr loc = arg_list->at(i);
2669 if (loc->is_register()) {
2670 param->load_item_force(loc);
2671 } else {
2672 LIR_Address* addr = loc->as_address_ptr();
2673 param->load_for_store(addr->type());
2674 if (addr->type() == T_OBJECT) {
2675 __ move_wide(param->result(), addr);
2676 } else
2677 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2678 __ unaligned_move(param->result(), addr);
2679 } else {
2680 __ move(param->result(), addr);
2681 }
2682 }
2683 }
2685 if (x->has_receiver()) {
2686 LIRItem* receiver = args->at(0);
2687 LIR_Opr loc = arg_list->at(0);
2688 if (loc->is_register()) {
2689 receiver->load_item_force(loc);
2690 } else {
2691 assert(loc->is_address(), "just checking");
2692 receiver->load_for_store(T_OBJECT);
2693 __ move_wide(receiver->result(), loc->as_address_ptr());
2694 }
2695 }
2696 }
2699 // Visits all arguments, returns appropriate items without loading them
2700 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2701 LIRItemList* argument_items = new LIRItemList();
2702 if (x->has_receiver()) {
2703 LIRItem* receiver = new LIRItem(x->receiver(), this);
2704 argument_items->append(receiver);
2705 }
2706 if (x->is_invokedynamic()) {
2707 // Insert a dummy for the synthetic MethodHandle argument.
2708 argument_items->append(NULL);
2709 }
2710 int idx = x->has_receiver() ? 1 : 0;
2711 for (int i = 0; i < x->number_of_arguments(); i++) {
2712 LIRItem* param = new LIRItem(x->argument_at(i), this);
2713 argument_items->append(param);
2714 idx += (param->type()->is_double_word() ? 2 : 1);
2715 }
2716 return argument_items;
2717 }
2720 // The invoke with receiver has following phases:
2721 // a) traverse and load/lock receiver;
2722 // b) traverse all arguments -> item-array (invoke_visit_argument)
2723 // c) push receiver on stack
2724 // d) load each of the items and push on stack
2725 // e) unlock receiver
2726 // f) move receiver into receiver-register %o0
2727 // g) lock result registers and emit call operation
2728 //
2729 // Before issuing a call, we must spill-save all values on stack
2730 // that are in caller-save register. "spill-save" moves thos registers
2731 // either in a free callee-save register or spills them if no free
2732 // callee save register is available.
2733 //
2734 // The problem is where to invoke spill-save.
2735 // - if invoked between e) and f), we may lock callee save
2736 // register in "spill-save" that destroys the receiver register
2737 // before f) is executed
2738 // - if we rearange the f) to be earlier, by loading %o0, it
2739 // may destroy a value on the stack that is currently in %o0
2740 // and is waiting to be spilled
2741 // - if we keep the receiver locked while doing spill-save,
2742 // we cannot spill it as it is spill-locked
2743 //
2744 void LIRGenerator::do_Invoke(Invoke* x) {
2745 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2747 LIR_OprList* arg_list = cc->args();
2748 LIRItemList* args = invoke_visit_arguments(x);
2749 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2751 // setup result register
2752 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2753 if (x->type() != voidType) {
2754 result_register = result_register_for(x->type());
2755 }
2757 CodeEmitInfo* info = state_for(x, x->state());
2759 // invokedynamics can deoptimize.
2760 CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2762 invoke_load_arguments(x, args, arg_list);
2764 if (x->has_receiver()) {
2765 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2766 receiver = args->at(0)->result();
2767 }
2769 // emit invoke code
2770 bool optimized = x->target_is_loaded() && x->target_is_final();
2771 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2773 // JSR 292
2774 // Preserve the SP over MethodHandle call sites.
2775 ciMethod* target = x->target();
2776 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2777 target->is_method_handle_intrinsic() ||
2778 target->is_compiled_lambda_form());
2779 if (is_method_handle_invoke) {
2780 info->set_is_method_handle_invoke(true);
2781 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2782 }
2784 switch (x->code()) {
2785 case Bytecodes::_invokestatic:
2786 __ call_static(target, result_register,
2787 SharedRuntime::get_resolve_static_call_stub(),
2788 arg_list, info);
2789 break;
2790 case Bytecodes::_invokespecial:
2791 case Bytecodes::_invokevirtual:
2792 case Bytecodes::_invokeinterface:
2793 // for final target we still produce an inline cache, in order
2794 // to be able to call mixed mode
2795 if (x->code() == Bytecodes::_invokespecial || optimized) {
2796 __ call_opt_virtual(target, receiver, result_register,
2797 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2798 arg_list, info);
2799 } else if (x->vtable_index() < 0) {
2800 __ call_icvirtual(target, receiver, result_register,
2801 SharedRuntime::get_resolve_virtual_call_stub(),
2802 arg_list, info);
2803 } else {
2804 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2805 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2806 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2807 }
2808 break;
2809 case Bytecodes::_invokedynamic: {
2810 ciBytecodeStream bcs(x->scope()->method());
2811 bcs.force_bci(x->state()->bci());
2812 assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2813 ciCPCache* cpcache = bcs.get_cpcache();
2815 // Get CallSite offset from constant pool cache pointer.
2816 int index = bcs.get_method_index();
2817 size_t call_site_offset = cpcache->get_f1_offset(index);
2819 // Load CallSite object from constant pool cache.
2820 LIR_Opr call_site = new_register(objectType);
2821 __ oop2reg(cpcache->constant_encoding(), call_site);
2822 __ move_wide(new LIR_Address(call_site, call_site_offset, T_OBJECT), call_site);
2824 // If this invokedynamic call site hasn't been executed yet in
2825 // the interpreter, the CallSite object in the constant pool
2826 // cache is still null and we need to deoptimize.
2827 if (cpcache->is_f1_null_at(index)) {
2828 // Only deoptimize if the CallSite object is still null; we don't
2829 // recompile methods in C1 after deoptimization so this call site
2830 // might be resolved the next time we execute it after OSR.
2831 DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2832 __ cmp(lir_cond_equal, call_site, LIR_OprFact::oopConst(NULL));
2833 __ branch(lir_cond_equal, T_OBJECT, deopt_stub);
2834 }
2836 // Use the receiver register for the synthetic MethodHandle
2837 // argument.
2838 receiver = LIR_Assembler::receiverOpr();
2840 // Load target MethodHandle from CallSite object.
2841 __ load(new LIR_Address(call_site, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2843 __ call_dynamic(target, receiver, result_register,
2844 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2845 arg_list, info);
2846 break;
2847 }
2848 default:
2849 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2850 break;
2851 }
2853 // JSR 292
2854 // Restore the SP after MethodHandle call sites.
2855 if (is_method_handle_invoke) {
2856 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2857 }
2859 if (x->type()->is_float() || x->type()->is_double()) {
2860 // Force rounding of results from non-strictfp when in strictfp
2861 // scope (or when we don't know the strictness of the callee, to
2862 // be safe.)
2863 if (method()->is_strict()) {
2864 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2865 result_register = round_item(result_register);
2866 }
2867 }
2868 }
2870 if (result_register->is_valid()) {
2871 LIR_Opr result = rlock_result(x);
2872 __ move(result_register, result);
2873 }
2874 }
2877 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2878 assert(x->number_of_arguments() == 1, "wrong type");
2879 LIRItem value (x->argument_at(0), this);
2880 LIR_Opr reg = rlock_result(x);
2881 value.load_item();
2882 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2883 __ move(tmp, reg);
2884 }
2888 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2889 void LIRGenerator::do_IfOp(IfOp* x) {
2890 #ifdef ASSERT
2891 {
2892 ValueTag xtag = x->x()->type()->tag();
2893 ValueTag ttag = x->tval()->type()->tag();
2894 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2895 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2896 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2897 }
2898 #endif
2900 LIRItem left(x->x(), this);
2901 LIRItem right(x->y(), this);
2902 left.load_item();
2903 if (can_inline_as_constant(right.value())) {
2904 right.dont_load_item();
2905 } else {
2906 right.load_item();
2907 }
2909 LIRItem t_val(x->tval(), this);
2910 LIRItem f_val(x->fval(), this);
2911 t_val.dont_load_item();
2912 f_val.dont_load_item();
2913 LIR_Opr reg = rlock_result(x);
2915 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2916 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2917 }
2919 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
2920 assert(x->number_of_arguments() == expected_arguments, "wrong type");
2921 LIR_Opr reg = result_register_for(x->type());
2922 __ call_runtime_leaf(routine, getThreadTemp(),
2923 reg, new LIR_OprList());
2924 LIR_Opr result = rlock_result(x);
2925 __ move(reg, result);
2926 }
2928 #ifdef TRACE_HAVE_INTRINSICS
2929 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
2930 LIR_Opr thread = getThreadPointer();
2931 LIR_Opr osthread = new_pointer_register();
2932 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
2933 size_t thread_id_size = OSThread::thread_id_size();
2934 if (thread_id_size == (size_t) BytesPerLong) {
2935 LIR_Opr id = new_register(T_LONG);
2936 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
2937 __ convert(Bytecodes::_l2i, id, rlock_result(x));
2938 } else if (thread_id_size == (size_t) BytesPerInt) {
2939 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
2940 } else {
2941 ShouldNotReachHere();
2942 }
2943 }
2945 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2946 CodeEmitInfo* info = state_for(x);
2947 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2948 assert(info != NULL, "must have info");
2949 LIRItem arg(x->argument_at(1), this);
2950 arg.load_item();
2951 LIR_Opr klass = new_register(T_OBJECT);
2952 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_OBJECT), klass, info);
2953 LIR_Opr id = new_register(T_LONG);
2954 ByteSize offset = TRACE_ID_OFFSET;
2955 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2956 __ move(trace_id_addr, id);
2957 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2958 __ store(id, trace_id_addr);
2959 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
2960 __ move(id, rlock_result(x));
2961 }
2962 #endif
2964 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2965 switch (x->id()) {
2966 case vmIntrinsics::_intBitsToFloat :
2967 case vmIntrinsics::_doubleToRawLongBits :
2968 case vmIntrinsics::_longBitsToDouble :
2969 case vmIntrinsics::_floatToRawIntBits : {
2970 do_FPIntrinsics(x);
2971 break;
2972 }
2974 #ifdef TRACE_HAVE_INTRINSICS
2975 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
2976 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
2977 case vmIntrinsics::_counterTime:
2978 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
2979 break;
2980 #endif
2982 case vmIntrinsics::_currentTimeMillis:
2983 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
2984 break;
2986 case vmIntrinsics::_nanoTime:
2987 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
2988 break;
2990 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2991 case vmIntrinsics::_isInstance: do_isInstance(x); break;
2992 case vmIntrinsics::_getClass: do_getClass(x); break;
2993 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2995 case vmIntrinsics::_dlog: // fall through
2996 case vmIntrinsics::_dlog10: // fall through
2997 case vmIntrinsics::_dabs: // fall through
2998 case vmIntrinsics::_dsqrt: // fall through
2999 case vmIntrinsics::_dtan: // fall through
3000 case vmIntrinsics::_dsin : // fall through
3001 case vmIntrinsics::_dcos : // fall through
3002 case vmIntrinsics::_dexp : // fall through
3003 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3004 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3006 // java.nio.Buffer.checkIndex
3007 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3009 case vmIntrinsics::_compareAndSwapObject:
3010 do_CompareAndSwap(x, objectType);
3011 break;
3012 case vmIntrinsics::_compareAndSwapInt:
3013 do_CompareAndSwap(x, intType);
3014 break;
3015 case vmIntrinsics::_compareAndSwapLong:
3016 do_CompareAndSwap(x, longType);
3017 break;
3019 case vmIntrinsics::_Reference_get:
3020 do_Reference_get(x);
3021 break;
3023 default: ShouldNotReachHere(); break;
3024 }
3025 }
3027 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3028 // Need recv in a temporary register so it interferes with the other temporaries
3029 LIR_Opr recv = LIR_OprFact::illegalOpr;
3030 LIR_Opr mdo = new_register(T_OBJECT);
3031 // tmp is used to hold the counters on SPARC
3032 LIR_Opr tmp = new_pointer_register();
3033 if (x->recv() != NULL) {
3034 LIRItem value(x->recv(), this);
3035 value.load_item();
3036 recv = new_register(T_OBJECT);
3037 __ move(value.result(), recv);
3038 }
3039 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3040 }
3042 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3043 // We can safely ignore accessors here, since c2 will inline them anyway,
3044 // accessors are also always mature.
3045 if (!x->inlinee()->is_accessor()) {
3046 CodeEmitInfo* info = state_for(x, x->state(), true);
3047 // Notify the runtime very infrequently only to take care of counter overflows
3048 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3049 }
3050 }
3052 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3053 int freq_log;
3054 int level = compilation()->env()->comp_level();
3055 if (level == CompLevel_limited_profile) {
3056 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3057 } else if (level == CompLevel_full_profile) {
3058 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3059 } else {
3060 ShouldNotReachHere();
3061 }
3062 // Increment the appropriate invocation/backedge counter and notify the runtime.
3063 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3064 }
3066 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3067 ciMethod *method, int frequency,
3068 int bci, bool backedge, bool notify) {
3069 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3070 int level = _compilation->env()->comp_level();
3071 assert(level > CompLevel_simple, "Shouldn't be here");
3073 int offset = -1;
3074 LIR_Opr counter_holder = new_register(T_OBJECT);
3075 LIR_Opr meth;
3076 if (level == CompLevel_limited_profile) {
3077 offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
3078 methodOopDesc::invocation_counter_offset());
3079 __ oop2reg(method->constant_encoding(), counter_holder);
3080 meth = counter_holder;
3081 } else if (level == CompLevel_full_profile) {
3082 offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
3083 methodDataOopDesc::invocation_counter_offset());
3084 ciMethodData* md = method->method_data_or_null();
3085 assert(md != NULL, "Sanity");
3086 __ oop2reg(md->constant_encoding(), counter_holder);
3087 meth = new_register(T_OBJECT);
3088 __ oop2reg(method->constant_encoding(), meth);
3089 } else {
3090 ShouldNotReachHere();
3091 }
3092 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3093 LIR_Opr result = new_register(T_INT);
3094 __ load(counter, result);
3095 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3096 __ store(result, counter);
3097 if (notify) {
3098 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3099 __ logical_and(result, mask, result);
3100 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3101 // The bci for info can point to cmp for if's we want the if bci
3102 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3103 __ branch(lir_cond_equal, T_INT, overflow);
3104 __ branch_destination(overflow->continuation());
3105 }
3106 }
3108 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3109 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3110 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3112 if (x->pass_thread()) {
3113 signature->append(T_ADDRESS);
3114 args->append(getThreadPointer());
3115 }
3117 for (int i = 0; i < x->number_of_arguments(); i++) {
3118 Value a = x->argument_at(i);
3119 LIRItem* item = new LIRItem(a, this);
3120 item->load_item();
3121 args->append(item->result());
3122 signature->append(as_BasicType(a->type()));
3123 }
3125 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3126 if (x->type() == voidType) {
3127 set_no_result(x);
3128 } else {
3129 __ move(result, rlock_result(x));
3130 }
3131 }
3133 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3134 LIRItemList args(1);
3135 LIRItem value(arg1, this);
3136 args.append(&value);
3137 BasicTypeList signature;
3138 signature.append(as_BasicType(arg1->type()));
3140 return call_runtime(&signature, &args, entry, result_type, info);
3141 }
3144 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3145 LIRItemList args(2);
3146 LIRItem value1(arg1, this);
3147 LIRItem value2(arg2, this);
3148 args.append(&value1);
3149 args.append(&value2);
3150 BasicTypeList signature;
3151 signature.append(as_BasicType(arg1->type()));
3152 signature.append(as_BasicType(arg2->type()));
3154 return call_runtime(&signature, &args, entry, result_type, info);
3155 }
3158 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3159 address entry, ValueType* result_type, CodeEmitInfo* info) {
3160 // get a result register
3161 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3162 LIR_Opr result = LIR_OprFact::illegalOpr;
3163 if (result_type->tag() != voidTag) {
3164 result = new_register(result_type);
3165 phys_reg = result_register_for(result_type);
3166 }
3168 // move the arguments into the correct location
3169 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3170 assert(cc->length() == args->length(), "argument mismatch");
3171 for (int i = 0; i < args->length(); i++) {
3172 LIR_Opr arg = args->at(i);
3173 LIR_Opr loc = cc->at(i);
3174 if (loc->is_register()) {
3175 __ move(arg, loc);
3176 } else {
3177 LIR_Address* addr = loc->as_address_ptr();
3178 // if (!can_store_as_constant(arg)) {
3179 // LIR_Opr tmp = new_register(arg->type());
3180 // __ move(arg, tmp);
3181 // arg = tmp;
3182 // }
3183 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3184 __ unaligned_move(arg, addr);
3185 } else {
3186 __ move(arg, addr);
3187 }
3188 }
3189 }
3191 if (info) {
3192 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3193 } else {
3194 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3195 }
3196 if (result->is_valid()) {
3197 __ move(phys_reg, result);
3198 }
3199 return result;
3200 }
3203 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3204 address entry, ValueType* result_type, CodeEmitInfo* info) {
3205 // get a result register
3206 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3207 LIR_Opr result = LIR_OprFact::illegalOpr;
3208 if (result_type->tag() != voidTag) {
3209 result = new_register(result_type);
3210 phys_reg = result_register_for(result_type);
3211 }
3213 // move the arguments into the correct location
3214 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3216 assert(cc->length() == args->length(), "argument mismatch");
3217 for (int i = 0; i < args->length(); i++) {
3218 LIRItem* arg = args->at(i);
3219 LIR_Opr loc = cc->at(i);
3220 if (loc->is_register()) {
3221 arg->load_item_force(loc);
3222 } else {
3223 LIR_Address* addr = loc->as_address_ptr();
3224 arg->load_for_store(addr->type());
3225 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3226 __ unaligned_move(arg->result(), addr);
3227 } else {
3228 __ move(arg->result(), addr);
3229 }
3230 }
3231 }
3233 if (info) {
3234 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3235 } else {
3236 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3237 }
3238 if (result->is_valid()) {
3239 __ move(phys_reg, result);
3240 }
3241 return result;
3242 }
3244 void LIRGenerator::do_MemBar(MemBar* x) {
3245 if (os::is_MP()) {
3246 LIR_Code code = x->code();
3247 switch(code) {
3248 case lir_membar_acquire : __ membar_acquire(); break;
3249 case lir_membar_release : __ membar_release(); break;
3250 case lir_membar : __ membar(); break;
3251 case lir_membar_loadload : __ membar_loadload(); break;
3252 case lir_membar_storestore: __ membar_storestore(); break;
3253 case lir_membar_loadstore : __ membar_loadstore(); break;
3254 case lir_membar_storeload : __ membar_storeload(); break;
3255 default : ShouldNotReachHere(); break;
3256 }
3257 }
3258 }