Wed, 23 Jan 2013 13:02:39 -0500
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank
1 /*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "ci/ciObjArray.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/bitMap.inline.hpp"
38 #include "utilities/macros.hpp"
39 #if INCLUDE_ALL_GCS
40 #include "gc_implementation/g1/heapRegion.hpp"
41 #endif // INCLUDE_ALL_GCS
43 #ifdef ASSERT
44 #define __ gen()->lir(__FILE__, __LINE__)->
45 #else
46 #define __ gen()->lir()->
47 #endif
49 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
50 #ifdef ARM
51 #define PATCHED_ADDR (204)
52 #else
53 #define PATCHED_ADDR (max_jint)
54 #endif
56 void PhiResolverState::reset(int max_vregs) {
57 // Initialize array sizes
58 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
59 _virtual_operands.trunc_to(0);
60 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
61 _other_operands.trunc_to(0);
62 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
63 _vreg_table.trunc_to(0);
64 }
68 //--------------------------------------------------------------
69 // PhiResolver
71 // Resolves cycles:
72 //
73 // r1 := r2 becomes temp := r1
74 // r2 := r1 r1 := r2
75 // r2 := temp
76 // and orders moves:
77 //
78 // r2 := r3 becomes r1 := r2
79 // r1 := r2 r2 := r3
81 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
82 : _gen(gen)
83 , _state(gen->resolver_state())
84 , _temp(LIR_OprFact::illegalOpr)
85 {
86 // reinitialize the shared state arrays
87 _state.reset(max_vregs);
88 }
91 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
92 assert(src->is_valid(), "");
93 assert(dest->is_valid(), "");
94 __ move(src, dest);
95 }
98 void PhiResolver::move_temp_to(LIR_Opr dest) {
99 assert(_temp->is_valid(), "");
100 emit_move(_temp, dest);
101 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
102 }
105 void PhiResolver::move_to_temp(LIR_Opr src) {
106 assert(_temp->is_illegal(), "");
107 _temp = _gen->new_register(src->type());
108 emit_move(src, _temp);
109 }
112 // Traverse assignment graph in depth first order and generate moves in post order
113 // ie. two assignments: b := c, a := b start with node c:
114 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
115 // Generates moves in this order: move b to a and move c to b
116 // ie. cycle a := b, b := a start with node a
117 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
118 // Generates moves in this order: move b to temp, move a to b, move temp to a
119 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
120 if (!dest->visited()) {
121 dest->set_visited();
122 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
123 move(dest, dest->destination_at(i));
124 }
125 } else if (!dest->start_node()) {
126 // cylce in graph detected
127 assert(_loop == NULL, "only one loop valid!");
128 _loop = dest;
129 move_to_temp(src->operand());
130 return;
131 } // else dest is a start node
133 if (!dest->assigned()) {
134 if (_loop == dest) {
135 move_temp_to(dest->operand());
136 dest->set_assigned();
137 } else if (src != NULL) {
138 emit_move(src->operand(), dest->operand());
139 dest->set_assigned();
140 }
141 }
142 }
145 PhiResolver::~PhiResolver() {
146 int i;
147 // resolve any cycles in moves from and to virtual registers
148 for (i = virtual_operands().length() - 1; i >= 0; i --) {
149 ResolveNode* node = virtual_operands()[i];
150 if (!node->visited()) {
151 _loop = NULL;
152 move(NULL, node);
153 node->set_start_node();
154 assert(_temp->is_illegal(), "move_temp_to() call missing");
155 }
156 }
158 // generate move for move from non virtual register to abitrary destination
159 for (i = other_operands().length() - 1; i >= 0; i --) {
160 ResolveNode* node = other_operands()[i];
161 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
162 emit_move(node->operand(), node->destination_at(j)->operand());
163 }
164 }
165 }
168 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
169 ResolveNode* node;
170 if (opr->is_virtual()) {
171 int vreg_num = opr->vreg_number();
172 node = vreg_table().at_grow(vreg_num, NULL);
173 assert(node == NULL || node->operand() == opr, "");
174 if (node == NULL) {
175 node = new ResolveNode(opr);
176 vreg_table()[vreg_num] = node;
177 }
178 // Make sure that all virtual operands show up in the list when
179 // they are used as the source of a move.
180 if (source && !virtual_operands().contains(node)) {
181 virtual_operands().append(node);
182 }
183 } else {
184 assert(source, "");
185 node = new ResolveNode(opr);
186 other_operands().append(node);
187 }
188 return node;
189 }
192 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
193 assert(dest->is_virtual(), "");
194 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
195 assert(src->is_valid(), "");
196 assert(dest->is_valid(), "");
197 ResolveNode* source = source_node(src);
198 source->append(destination_node(dest));
199 }
202 //--------------------------------------------------------------
203 // LIRItem
205 void LIRItem::set_result(LIR_Opr opr) {
206 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
207 value()->set_operand(opr);
209 if (opr->is_virtual()) {
210 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
211 }
213 _result = opr;
214 }
216 void LIRItem::load_item() {
217 if (result()->is_illegal()) {
218 // update the items result
219 _result = value()->operand();
220 }
221 if (!result()->is_register()) {
222 LIR_Opr reg = _gen->new_register(value()->type());
223 __ move(result(), reg);
224 if (result()->is_constant()) {
225 _result = reg;
226 } else {
227 set_result(reg);
228 }
229 }
230 }
233 void LIRItem::load_for_store(BasicType type) {
234 if (_gen->can_store_as_constant(value(), type)) {
235 _result = value()->operand();
236 if (!_result->is_constant()) {
237 _result = LIR_OprFact::value_type(value()->type());
238 }
239 } else if (type == T_BYTE || type == T_BOOLEAN) {
240 load_byte_item();
241 } else {
242 load_item();
243 }
244 }
246 void LIRItem::load_item_force(LIR_Opr reg) {
247 LIR_Opr r = result();
248 if (r != reg) {
249 #if !defined(ARM) && !defined(E500V2)
250 if (r->type() != reg->type()) {
251 // moves between different types need an intervening spill slot
252 r = _gen->force_to_spill(r, reg->type());
253 }
254 #endif
255 __ move(r, reg);
256 _result = reg;
257 }
258 }
260 ciObject* LIRItem::get_jobject_constant() const {
261 ObjectType* oc = type()->as_ObjectType();
262 if (oc) {
263 return oc->constant_value();
264 }
265 return NULL;
266 }
269 jint LIRItem::get_jint_constant() const {
270 assert(is_constant() && value() != NULL, "");
271 assert(type()->as_IntConstant() != NULL, "type check");
272 return type()->as_IntConstant()->value();
273 }
276 jint LIRItem::get_address_constant() const {
277 assert(is_constant() && value() != NULL, "");
278 assert(type()->as_AddressConstant() != NULL, "type check");
279 return type()->as_AddressConstant()->value();
280 }
283 jfloat LIRItem::get_jfloat_constant() const {
284 assert(is_constant() && value() != NULL, "");
285 assert(type()->as_FloatConstant() != NULL, "type check");
286 return type()->as_FloatConstant()->value();
287 }
290 jdouble LIRItem::get_jdouble_constant() const {
291 assert(is_constant() && value() != NULL, "");
292 assert(type()->as_DoubleConstant() != NULL, "type check");
293 return type()->as_DoubleConstant()->value();
294 }
297 jlong LIRItem::get_jlong_constant() const {
298 assert(is_constant() && value() != NULL, "");
299 assert(type()->as_LongConstant() != NULL, "type check");
300 return type()->as_LongConstant()->value();
301 }
305 //--------------------------------------------------------------
308 void LIRGenerator::init() {
309 _bs = Universe::heap()->barrier_set();
310 }
313 void LIRGenerator::block_do_prolog(BlockBegin* block) {
314 #ifndef PRODUCT
315 if (PrintIRWithLIR) {
316 block->print();
317 }
318 #endif
320 // set up the list of LIR instructions
321 assert(block->lir() == NULL, "LIR list already computed for this block");
322 _lir = new LIR_List(compilation(), block);
323 block->set_lir(_lir);
325 __ branch_destination(block->label());
327 if (LIRTraceExecution &&
328 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
329 !block->is_set(BlockBegin::exception_entry_flag)) {
330 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
331 trace_block_entry(block);
332 }
333 }
336 void LIRGenerator::block_do_epilog(BlockBegin* block) {
337 #ifndef PRODUCT
338 if (PrintIRWithLIR) {
339 tty->cr();
340 }
341 #endif
343 // LIR_Opr for unpinned constants shouldn't be referenced by other
344 // blocks so clear them out after processing the block.
345 for (int i = 0; i < _unpinned_constants.length(); i++) {
346 _unpinned_constants.at(i)->clear_operand();
347 }
348 _unpinned_constants.trunc_to(0);
350 // clear our any registers for other local constants
351 _constants.trunc_to(0);
352 _reg_for_constants.trunc_to(0);
353 }
356 void LIRGenerator::block_do(BlockBegin* block) {
357 CHECK_BAILOUT();
359 block_do_prolog(block);
360 set_block(block);
362 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
363 if (instr->is_pinned()) do_root(instr);
364 }
366 set_block(NULL);
367 block_do_epilog(block);
368 }
371 //-------------------------LIRGenerator-----------------------------
373 // This is where the tree-walk starts; instr must be root;
374 void LIRGenerator::do_root(Value instr) {
375 CHECK_BAILOUT();
377 InstructionMark im(compilation(), instr);
379 assert(instr->is_pinned(), "use only with roots");
380 assert(instr->subst() == instr, "shouldn't have missed substitution");
382 instr->visit(this);
384 assert(!instr->has_uses() || instr->operand()->is_valid() ||
385 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
386 }
389 // This is called for each node in tree; the walk stops if a root is reached
390 void LIRGenerator::walk(Value instr) {
391 InstructionMark im(compilation(), instr);
392 //stop walk when encounter a root
393 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
394 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
395 } else {
396 assert(instr->subst() == instr, "shouldn't have missed substitution");
397 instr->visit(this);
398 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
399 }
400 }
403 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
404 assert(state != NULL, "state must be defined");
406 ValueStack* s = state;
407 for_each_state(s) {
408 if (s->kind() == ValueStack::EmptyExceptionState) {
409 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
410 continue;
411 }
413 int index;
414 Value value;
415 for_each_stack_value(s, index, value) {
416 assert(value->subst() == value, "missed substitution");
417 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
418 walk(value);
419 assert(value->operand()->is_valid(), "must be evaluated now");
420 }
421 }
423 int bci = s->bci();
424 IRScope* scope = s->scope();
425 ciMethod* method = scope->method();
427 MethodLivenessResult liveness = method->liveness_at_bci(bci);
428 if (bci == SynchronizationEntryBCI) {
429 if (x->as_ExceptionObject() || x->as_Throw()) {
430 // all locals are dead on exit from the synthetic unlocker
431 liveness.clear();
432 } else {
433 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
434 }
435 }
436 if (!liveness.is_valid()) {
437 // Degenerate or breakpointed method.
438 bailout("Degenerate or breakpointed method");
439 } else {
440 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
441 for_each_local_value(s, index, value) {
442 assert(value->subst() == value, "missed substition");
443 if (liveness.at(index) && !value->type()->is_illegal()) {
444 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
445 walk(value);
446 assert(value->operand()->is_valid(), "must be evaluated now");
447 }
448 } else {
449 // NULL out this local so that linear scan can assume that all non-NULL values are live.
450 s->invalidate_local(index);
451 }
452 }
453 }
454 }
456 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
457 }
460 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
461 return state_for(x, x->exception_state());
462 }
465 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) {
466 if (!obj->is_loaded() || PatchALot) {
467 assert(info != NULL, "info must be set if class is not loaded");
468 __ klass2reg_patch(NULL, r, info);
469 } else {
470 // no patching needed
471 __ metadata2reg(obj->constant_encoding(), r);
472 }
473 }
476 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
477 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
478 CodeStub* stub = new RangeCheckStub(range_check_info, index);
479 if (index->is_constant()) {
480 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
481 index->as_jint(), null_check_info);
482 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
483 } else {
484 cmp_reg_mem(lir_cond_aboveEqual, index, array,
485 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
486 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
487 }
488 }
491 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
492 CodeStub* stub = new RangeCheckStub(info, index, true);
493 if (index->is_constant()) {
494 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
495 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
496 } else {
497 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
498 java_nio_Buffer::limit_offset(), T_INT, info);
499 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
500 }
501 __ move(index, result);
502 }
506 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
507 LIR_Opr result_op = result;
508 LIR_Opr left_op = left;
509 LIR_Opr right_op = right;
511 if (TwoOperandLIRForm && left_op != result_op) {
512 assert(right_op != result_op, "malformed");
513 __ move(left_op, result_op);
514 left_op = result_op;
515 }
517 switch(code) {
518 case Bytecodes::_dadd:
519 case Bytecodes::_fadd:
520 case Bytecodes::_ladd:
521 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
522 case Bytecodes::_fmul:
523 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
525 case Bytecodes::_dmul:
526 {
527 if (is_strictfp) {
528 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
529 } else {
530 __ mul(left_op, right_op, result_op); break;
531 }
532 }
533 break;
535 case Bytecodes::_imul:
536 {
537 bool did_strength_reduce = false;
539 if (right->is_constant()) {
540 int c = right->as_jint();
541 if (is_power_of_2(c)) {
542 // do not need tmp here
543 __ shift_left(left_op, exact_log2(c), result_op);
544 did_strength_reduce = true;
545 } else {
546 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
547 }
548 }
549 // we couldn't strength reduce so just emit the multiply
550 if (!did_strength_reduce) {
551 __ mul(left_op, right_op, result_op);
552 }
553 }
554 break;
556 case Bytecodes::_dsub:
557 case Bytecodes::_fsub:
558 case Bytecodes::_lsub:
559 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
561 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
562 // ldiv and lrem are implemented with a direct runtime call
564 case Bytecodes::_ddiv:
565 {
566 if (is_strictfp) {
567 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
568 } else {
569 __ div (left_op, right_op, result_op); break;
570 }
571 }
572 break;
574 case Bytecodes::_drem:
575 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
577 default: ShouldNotReachHere();
578 }
579 }
582 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
583 arithmetic_op(code, result, left, right, false, tmp);
584 }
587 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
588 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
589 }
592 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
593 arithmetic_op(code, result, left, right, is_strictfp, tmp);
594 }
597 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
598 if (TwoOperandLIRForm && value != result_op) {
599 assert(count != result_op, "malformed");
600 __ move(value, result_op);
601 value = result_op;
602 }
604 assert(count->is_constant() || count->is_register(), "must be");
605 switch(code) {
606 case Bytecodes::_ishl:
607 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
608 case Bytecodes::_ishr:
609 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
610 case Bytecodes::_iushr:
611 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
612 default: ShouldNotReachHere();
613 }
614 }
617 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
618 if (TwoOperandLIRForm && left_op != result_op) {
619 assert(right_op != result_op, "malformed");
620 __ move(left_op, result_op);
621 left_op = result_op;
622 }
624 switch(code) {
625 case Bytecodes::_iand:
626 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
628 case Bytecodes::_ior:
629 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
631 case Bytecodes::_ixor:
632 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
634 default: ShouldNotReachHere();
635 }
636 }
639 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
640 if (!GenerateSynchronizationCode) return;
641 // for slow path, use debug info for state after successful locking
642 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
643 __ load_stack_address_monitor(monitor_no, lock);
644 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
645 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
646 }
649 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
650 if (!GenerateSynchronizationCode) return;
651 // setup registers
652 LIR_Opr hdr = lock;
653 lock = new_hdr;
654 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
655 __ load_stack_address_monitor(monitor_no, lock);
656 __ unlock_object(hdr, object, lock, scratch, slow_path);
657 }
660 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
661 klass2reg_with_patching(klass_reg, klass, info);
662 // If klass is not loaded we do not know if the klass has finalizers:
663 if (UseFastNewInstance && klass->is_loaded()
664 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
666 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
668 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
670 assert(klass->is_loaded(), "must be loaded");
671 // allocate space for instance
672 assert(klass->size_helper() >= 0, "illegal instance size");
673 const int instance_size = align_object_size(klass->size_helper());
674 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
675 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
676 } else {
677 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
678 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
679 __ branch_destination(slow_path->continuation());
680 }
681 }
684 static bool is_constant_zero(Instruction* inst) {
685 IntConstant* c = inst->type()->as_IntConstant();
686 if (c) {
687 return (c->value() == 0);
688 }
689 return false;
690 }
693 static bool positive_constant(Instruction* inst) {
694 IntConstant* c = inst->type()->as_IntConstant();
695 if (c) {
696 return (c->value() >= 0);
697 }
698 return false;
699 }
702 static ciArrayKlass* as_array_klass(ciType* type) {
703 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
704 return (ciArrayKlass*)type;
705 } else {
706 return NULL;
707 }
708 }
710 static Value maxvalue(IfOp* ifop) {
711 switch (ifop->cond()) {
712 case If::eql: return NULL;
713 case If::neq: return NULL;
714 case If::lss: // x < y ? x : y
715 case If::leq: // x <= y ? x : y
716 if (ifop->x() == ifop->tval() &&
717 ifop->y() == ifop->fval()) return ifop->y();
718 return NULL;
720 case If::gtr: // x > y ? y : x
721 case If::geq: // x >= y ? y : x
722 if (ifop->x() == ifop->tval() &&
723 ifop->y() == ifop->fval()) return ifop->y();
724 return NULL;
726 }
727 }
729 static ciType* phi_declared_type(Phi* phi) {
730 ciType* t = phi->operand_at(0)->declared_type();
731 if (t == NULL) {
732 return NULL;
733 }
734 for(int i = 1; i < phi->operand_count(); i++) {
735 if (t != phi->operand_at(i)->declared_type()) {
736 return NULL;
737 }
738 }
739 return t;
740 }
742 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
743 Instruction* src = x->argument_at(0);
744 Instruction* src_pos = x->argument_at(1);
745 Instruction* dst = x->argument_at(2);
746 Instruction* dst_pos = x->argument_at(3);
747 Instruction* length = x->argument_at(4);
749 // first try to identify the likely type of the arrays involved
750 ciArrayKlass* expected_type = NULL;
751 bool is_exact = false, src_objarray = false, dst_objarray = false;
752 {
753 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
754 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
755 Phi* phi;
756 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
757 src_declared_type = as_array_klass(phi_declared_type(phi));
758 }
759 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
760 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
761 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
762 dst_declared_type = as_array_klass(phi_declared_type(phi));
763 }
765 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
766 // the types exactly match so the type is fully known
767 is_exact = true;
768 expected_type = src_exact_type;
769 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
770 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
771 ciArrayKlass* src_type = NULL;
772 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
773 src_type = (ciArrayKlass*) src_exact_type;
774 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
775 src_type = (ciArrayKlass*) src_declared_type;
776 }
777 if (src_type != NULL) {
778 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
779 is_exact = true;
780 expected_type = dst_type;
781 }
782 }
783 }
784 // at least pass along a good guess
785 if (expected_type == NULL) expected_type = dst_exact_type;
786 if (expected_type == NULL) expected_type = src_declared_type;
787 if (expected_type == NULL) expected_type = dst_declared_type;
789 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
790 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
791 }
793 // if a probable array type has been identified, figure out if any
794 // of the required checks for a fast case can be elided.
795 int flags = LIR_OpArrayCopy::all_flags;
797 if (!src_objarray)
798 flags &= ~LIR_OpArrayCopy::src_objarray;
799 if (!dst_objarray)
800 flags &= ~LIR_OpArrayCopy::dst_objarray;
802 if (!x->arg_needs_null_check(0))
803 flags &= ~LIR_OpArrayCopy::src_null_check;
804 if (!x->arg_needs_null_check(2))
805 flags &= ~LIR_OpArrayCopy::dst_null_check;
808 if (expected_type != NULL) {
809 Value length_limit = NULL;
811 IfOp* ifop = length->as_IfOp();
812 if (ifop != NULL) {
813 // look for expressions like min(v, a.length) which ends up as
814 // x > y ? y : x or x >= y ? y : x
815 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
816 ifop->x() == ifop->fval() &&
817 ifop->y() == ifop->tval()) {
818 length_limit = ifop->y();
819 }
820 }
822 // try to skip null checks and range checks
823 NewArray* src_array = src->as_NewArray();
824 if (src_array != NULL) {
825 flags &= ~LIR_OpArrayCopy::src_null_check;
826 if (length_limit != NULL &&
827 src_array->length() == length_limit &&
828 is_constant_zero(src_pos)) {
829 flags &= ~LIR_OpArrayCopy::src_range_check;
830 }
831 }
833 NewArray* dst_array = dst->as_NewArray();
834 if (dst_array != NULL) {
835 flags &= ~LIR_OpArrayCopy::dst_null_check;
836 if (length_limit != NULL &&
837 dst_array->length() == length_limit &&
838 is_constant_zero(dst_pos)) {
839 flags &= ~LIR_OpArrayCopy::dst_range_check;
840 }
841 }
843 // check from incoming constant values
844 if (positive_constant(src_pos))
845 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
846 if (positive_constant(dst_pos))
847 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
848 if (positive_constant(length))
849 flags &= ~LIR_OpArrayCopy::length_positive_check;
851 // see if the range check can be elided, which might also imply
852 // that src or dst is non-null.
853 ArrayLength* al = length->as_ArrayLength();
854 if (al != NULL) {
855 if (al->array() == src) {
856 // it's the length of the source array
857 flags &= ~LIR_OpArrayCopy::length_positive_check;
858 flags &= ~LIR_OpArrayCopy::src_null_check;
859 if (is_constant_zero(src_pos))
860 flags &= ~LIR_OpArrayCopy::src_range_check;
861 }
862 if (al->array() == dst) {
863 // it's the length of the destination array
864 flags &= ~LIR_OpArrayCopy::length_positive_check;
865 flags &= ~LIR_OpArrayCopy::dst_null_check;
866 if (is_constant_zero(dst_pos))
867 flags &= ~LIR_OpArrayCopy::dst_range_check;
868 }
869 }
870 if (is_exact) {
871 flags &= ~LIR_OpArrayCopy::type_check;
872 }
873 }
875 IntConstant* src_int = src_pos->type()->as_IntConstant();
876 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
877 if (src_int && dst_int) {
878 int s_offs = src_int->value();
879 int d_offs = dst_int->value();
880 if (src_int->value() >= dst_int->value()) {
881 flags &= ~LIR_OpArrayCopy::overlapping;
882 }
883 if (expected_type != NULL) {
884 BasicType t = expected_type->element_type()->basic_type();
885 int element_size = type2aelembytes(t);
886 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
887 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
888 flags &= ~LIR_OpArrayCopy::unaligned;
889 }
890 }
891 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
892 // src and dest positions are the same, or dst is zero so assume
893 // nonoverlapping copy.
894 flags &= ~LIR_OpArrayCopy::overlapping;
895 }
897 if (src == dst) {
898 // moving within a single array so no type checks are needed
899 if (flags & LIR_OpArrayCopy::type_check) {
900 flags &= ~LIR_OpArrayCopy::type_check;
901 }
902 }
903 *flagsp = flags;
904 *expected_typep = (ciArrayKlass*)expected_type;
905 }
908 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
909 assert(opr->is_register(), "why spill if item is not register?");
911 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
912 LIR_Opr result = new_register(T_FLOAT);
913 set_vreg_flag(result, must_start_in_memory);
914 assert(opr->is_register(), "only a register can be spilled");
915 assert(opr->value_type()->is_float(), "rounding only for floats available");
916 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
917 return result;
918 }
919 return opr;
920 }
923 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
924 assert(type2size[t] == type2size[value->type()],
925 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
926 if (!value->is_register()) {
927 // force into a register
928 LIR_Opr r = new_register(value->type());
929 __ move(value, r);
930 value = r;
931 }
933 // create a spill location
934 LIR_Opr tmp = new_register(t);
935 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
937 // move from register to spill
938 __ move(value, tmp);
939 return tmp;
940 }
942 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
943 if (if_instr->should_profile()) {
944 ciMethod* method = if_instr->profiled_method();
945 assert(method != NULL, "method should be set if branch is profiled");
946 ciMethodData* md = method->method_data_or_null();
947 assert(md != NULL, "Sanity");
948 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
949 assert(data != NULL, "must have profiling data");
950 assert(data->is_BranchData(), "need BranchData for two-way branches");
951 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
952 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
953 if (if_instr->is_swapped()) {
954 int t = taken_count_offset;
955 taken_count_offset = not_taken_count_offset;
956 not_taken_count_offset = t;
957 }
959 LIR_Opr md_reg = new_register(T_METADATA);
960 __ metadata2reg(md->constant_encoding(), md_reg);
962 LIR_Opr data_offset_reg = new_pointer_register();
963 __ cmove(lir_cond(cond),
964 LIR_OprFact::intptrConst(taken_count_offset),
965 LIR_OprFact::intptrConst(not_taken_count_offset),
966 data_offset_reg, as_BasicType(if_instr->x()->type()));
968 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
969 LIR_Opr data_reg = new_pointer_register();
970 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
971 __ move(data_addr, data_reg);
972 // Use leal instead of add to avoid destroying condition codes on x86
973 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
974 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
975 __ move(data_reg, data_addr);
976 }
977 }
979 // Phi technique:
980 // This is about passing live values from one basic block to the other.
981 // In code generated with Java it is rather rare that more than one
982 // value is on the stack from one basic block to the other.
983 // We optimize our technique for efficient passing of one value
984 // (of type long, int, double..) but it can be extended.
985 // When entering or leaving a basic block, all registers and all spill
986 // slots are release and empty. We use the released registers
987 // and spill slots to pass the live values from one block
988 // to the other. The topmost value, i.e., the value on TOS of expression
989 // stack is passed in registers. All other values are stored in spilling
990 // area. Every Phi has an index which designates its spill slot
991 // At exit of a basic block, we fill the register(s) and spill slots.
992 // At entry of a basic block, the block_prolog sets up the content of phi nodes
993 // and locks necessary registers and spilling slots.
996 // move current value to referenced phi function
997 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
998 Phi* phi = sux_val->as_Phi();
999 // cur_val can be null without phi being null in conjunction with inlining
1000 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1001 LIR_Opr operand = cur_val->operand();
1002 if (cur_val->operand()->is_illegal()) {
1003 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1004 "these can be produced lazily");
1005 operand = operand_for_instruction(cur_val);
1006 }
1007 resolver->move(operand, operand_for_instruction(phi));
1008 }
1009 }
1012 // Moves all stack values into their PHI position
1013 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1014 BlockBegin* bb = block();
1015 if (bb->number_of_sux() == 1) {
1016 BlockBegin* sux = bb->sux_at(0);
1017 assert(sux->number_of_preds() > 0, "invalid CFG");
1019 // a block with only one predecessor never has phi functions
1020 if (sux->number_of_preds() > 1) {
1021 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1022 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1024 ValueStack* sux_state = sux->state();
1025 Value sux_value;
1026 int index;
1028 assert(cur_state->scope() == sux_state->scope(), "not matching");
1029 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1030 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1032 for_each_stack_value(sux_state, index, sux_value) {
1033 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1034 }
1036 for_each_local_value(sux_state, index, sux_value) {
1037 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1038 }
1040 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1041 }
1042 }
1043 }
1046 LIR_Opr LIRGenerator::new_register(BasicType type) {
1047 int vreg = _virtual_register_number;
1048 // add a little fudge factor for the bailout, since the bailout is
1049 // only checked periodically. This gives a few extra registers to
1050 // hand out before we really run out, which helps us keep from
1051 // tripping over assertions.
1052 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1053 bailout("out of virtual registers");
1054 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1055 // wrap it around
1056 _virtual_register_number = LIR_OprDesc::vreg_base;
1057 }
1058 }
1059 _virtual_register_number += 1;
1060 return LIR_OprFact::virtual_register(vreg, type);
1061 }
1064 // Try to lock using register in hint
1065 LIR_Opr LIRGenerator::rlock(Value instr) {
1066 return new_register(instr->type());
1067 }
1070 // does an rlock and sets result
1071 LIR_Opr LIRGenerator::rlock_result(Value x) {
1072 LIR_Opr reg = rlock(x);
1073 set_result(x, reg);
1074 return reg;
1075 }
1078 // does an rlock and sets result
1079 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1080 LIR_Opr reg;
1081 switch (type) {
1082 case T_BYTE:
1083 case T_BOOLEAN:
1084 reg = rlock_byte(type);
1085 break;
1086 default:
1087 reg = rlock(x);
1088 break;
1089 }
1091 set_result(x, reg);
1092 return reg;
1093 }
1096 //---------------------------------------------------------------------
1097 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1098 ObjectType* oc = value->type()->as_ObjectType();
1099 if (oc) {
1100 return oc->constant_value();
1101 }
1102 return NULL;
1103 }
1106 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1107 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1108 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1110 // no moves are created for phi functions at the begin of exception
1111 // handlers, so assign operands manually here
1112 for_each_phi_fun(block(), phi,
1113 operand_for_instruction(phi));
1115 LIR_Opr thread_reg = getThreadPointer();
1116 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1117 exceptionOopOpr());
1118 __ move_wide(LIR_OprFact::oopConst(NULL),
1119 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1120 __ move_wide(LIR_OprFact::oopConst(NULL),
1121 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1123 LIR_Opr result = new_register(T_OBJECT);
1124 __ move(exceptionOopOpr(), result);
1125 set_result(x, result);
1126 }
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 //----------------------------------------------------------------------
1132 //----------------------------------------------------------------------
1133 // visitor functions
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 //----------------------------------------------------------------------
1137 //----------------------------------------------------------------------
1139 void LIRGenerator::do_Phi(Phi* x) {
1140 // phi functions are never visited directly
1141 ShouldNotReachHere();
1142 }
1145 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1146 void LIRGenerator::do_Constant(Constant* x) {
1147 if (x->state_before() != NULL) {
1148 // Any constant with a ValueStack requires patching so emit the patch here
1149 LIR_Opr reg = rlock_result(x);
1150 CodeEmitInfo* info = state_for(x, x->state_before());
1151 __ oop2reg_patch(NULL, reg, info);
1152 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1153 if (!x->is_pinned()) {
1154 // unpinned constants are handled specially so that they can be
1155 // put into registers when they are used multiple times within a
1156 // block. After the block completes their operand will be
1157 // cleared so that other blocks can't refer to that register.
1158 set_result(x, load_constant(x));
1159 } else {
1160 LIR_Opr res = x->operand();
1161 if (!res->is_valid()) {
1162 res = LIR_OprFact::value_type(x->type());
1163 }
1164 if (res->is_constant()) {
1165 LIR_Opr reg = rlock_result(x);
1166 __ move(res, reg);
1167 } else {
1168 set_result(x, res);
1169 }
1170 }
1171 } else {
1172 set_result(x, LIR_OprFact::value_type(x->type()));
1173 }
1174 }
1177 void LIRGenerator::do_Local(Local* x) {
1178 // operand_for_instruction has the side effect of setting the result
1179 // so there's no need to do it here.
1180 operand_for_instruction(x);
1181 }
1184 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1185 Unimplemented();
1186 }
1189 void LIRGenerator::do_Return(Return* x) {
1190 if (compilation()->env()->dtrace_method_probes()) {
1191 BasicTypeList signature;
1192 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1193 signature.append(T_OBJECT); // Method*
1194 LIR_OprList* args = new LIR_OprList();
1195 args->append(getThreadPointer());
1196 LIR_Opr meth = new_register(T_METADATA);
1197 __ metadata2reg(method()->constant_encoding(), meth);
1198 args->append(meth);
1199 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1200 }
1202 if (x->type()->is_void()) {
1203 __ return_op(LIR_OprFact::illegalOpr);
1204 } else {
1205 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1206 LIRItem result(x->result(), this);
1208 result.load_item_force(reg);
1209 __ return_op(result.result());
1210 }
1211 set_no_result(x);
1212 }
1214 // Examble: ref.get()
1215 // Combination of LoadField and g1 pre-write barrier
1216 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1218 const int referent_offset = java_lang_ref_Reference::referent_offset;
1219 guarantee(referent_offset > 0, "referent offset not initialized");
1221 assert(x->number_of_arguments() == 1, "wrong type");
1223 LIRItem reference(x->argument_at(0), this);
1224 reference.load_item();
1226 // need to perform the null check on the reference objecy
1227 CodeEmitInfo* info = NULL;
1228 if (x->needs_null_check()) {
1229 info = state_for(x);
1230 }
1232 LIR_Address* referent_field_adr =
1233 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1235 LIR_Opr result = rlock_result(x);
1237 __ load(referent_field_adr, result, info);
1239 // Register the value in the referent field with the pre-barrier
1240 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1241 result /* pre_val */,
1242 false /* do_load */,
1243 false /* patch */,
1244 NULL /* info */);
1245 }
1247 // Example: clazz.isInstance(object)
1248 void LIRGenerator::do_isInstance(Intrinsic* x) {
1249 assert(x->number_of_arguments() == 2, "wrong type");
1251 // TODO could try to substitute this node with an equivalent InstanceOf
1252 // if clazz is known to be a constant Class. This will pick up newly found
1253 // constants after HIR construction. I'll leave this to a future change.
1255 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1256 // could follow the aastore example in a future change.
1258 LIRItem clazz(x->argument_at(0), this);
1259 LIRItem object(x->argument_at(1), this);
1260 clazz.load_item();
1261 object.load_item();
1262 LIR_Opr result = rlock_result(x);
1264 // need to perform null check on clazz
1265 if (x->needs_null_check()) {
1266 CodeEmitInfo* info = state_for(x);
1267 __ null_check(clazz.result(), info);
1268 }
1270 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1271 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1272 x->type(),
1273 NULL); // NULL CodeEmitInfo results in a leaf call
1274 __ move(call_result, result);
1275 }
1277 // Example: object.getClass ()
1278 void LIRGenerator::do_getClass(Intrinsic* x) {
1279 assert(x->number_of_arguments() == 1, "wrong type");
1281 LIRItem rcvr(x->argument_at(0), this);
1282 rcvr.load_item();
1283 LIR_Opr result = rlock_result(x);
1285 // need to perform the null check on the rcvr
1286 CodeEmitInfo* info = NULL;
1287 if (x->needs_null_check()) {
1288 info = state_for(x);
1289 }
1290 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info);
1291 __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1292 }
1295 // Example: Thread.currentThread()
1296 void LIRGenerator::do_currentThread(Intrinsic* x) {
1297 assert(x->number_of_arguments() == 0, "wrong type");
1298 LIR_Opr reg = rlock_result(x);
1299 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1300 }
1303 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1304 assert(x->number_of_arguments() == 1, "wrong type");
1305 LIRItem receiver(x->argument_at(0), this);
1307 receiver.load_item();
1308 BasicTypeList signature;
1309 signature.append(T_OBJECT); // receiver
1310 LIR_OprList* args = new LIR_OprList();
1311 args->append(receiver.result());
1312 CodeEmitInfo* info = state_for(x, x->state());
1313 call_runtime(&signature, args,
1314 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1315 voidType, info);
1317 set_no_result(x);
1318 }
1321 //------------------------local access--------------------------------------
1323 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1324 if (x->operand()->is_illegal()) {
1325 Constant* c = x->as_Constant();
1326 if (c != NULL) {
1327 x->set_operand(LIR_OprFact::value_type(c->type()));
1328 } else {
1329 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1330 // allocate a virtual register for this local or phi
1331 x->set_operand(rlock(x));
1332 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1333 }
1334 }
1335 return x->operand();
1336 }
1339 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1340 if (opr->is_virtual()) {
1341 return instruction_for_vreg(opr->vreg_number());
1342 }
1343 return NULL;
1344 }
1347 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1348 if (reg_num < _instruction_for_operand.length()) {
1349 return _instruction_for_operand.at(reg_num);
1350 }
1351 return NULL;
1352 }
1355 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1356 if (_vreg_flags.size_in_bits() == 0) {
1357 BitMap2D temp(100, num_vreg_flags);
1358 temp.clear();
1359 _vreg_flags = temp;
1360 }
1361 _vreg_flags.at_put_grow(vreg_num, f, true);
1362 }
1364 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1365 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1366 return false;
1367 }
1368 return _vreg_flags.at(vreg_num, f);
1369 }
1372 // Block local constant handling. This code is useful for keeping
1373 // unpinned constants and constants which aren't exposed in the IR in
1374 // registers. Unpinned Constant instructions have their operands
1375 // cleared when the block is finished so that other blocks can't end
1376 // up referring to their registers.
1378 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1379 assert(!x->is_pinned(), "only for unpinned constants");
1380 _unpinned_constants.append(x);
1381 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1382 }
1385 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1386 BasicType t = c->type();
1387 for (int i = 0; i < _constants.length(); i++) {
1388 LIR_Const* other = _constants.at(i);
1389 if (t == other->type()) {
1390 switch (t) {
1391 case T_INT:
1392 case T_FLOAT:
1393 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1394 break;
1395 case T_LONG:
1396 case T_DOUBLE:
1397 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1398 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1399 break;
1400 case T_OBJECT:
1401 if (c->as_jobject() != other->as_jobject()) continue;
1402 break;
1403 }
1404 return _reg_for_constants.at(i);
1405 }
1406 }
1408 LIR_Opr result = new_register(t);
1409 __ move((LIR_Opr)c, result);
1410 _constants.append(c);
1411 _reg_for_constants.append(result);
1412 return result;
1413 }
1415 // Various barriers
1417 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1418 bool do_load, bool patch, CodeEmitInfo* info) {
1419 // Do the pre-write barrier, if any.
1420 switch (_bs->kind()) {
1421 #if INCLUDE_ALL_GCS
1422 case BarrierSet::G1SATBCT:
1423 case BarrierSet::G1SATBCTLogging:
1424 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1425 break;
1426 #endif // INCLUDE_ALL_GCS
1427 case BarrierSet::CardTableModRef:
1428 case BarrierSet::CardTableExtension:
1429 // No pre barriers
1430 break;
1431 case BarrierSet::ModRef:
1432 case BarrierSet::Other:
1433 // No pre barriers
1434 break;
1435 default :
1436 ShouldNotReachHere();
1438 }
1439 }
1441 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1442 switch (_bs->kind()) {
1443 #if INCLUDE_ALL_GCS
1444 case BarrierSet::G1SATBCT:
1445 case BarrierSet::G1SATBCTLogging:
1446 G1SATBCardTableModRef_post_barrier(addr, new_val);
1447 break;
1448 #endif // INCLUDE_ALL_GCS
1449 case BarrierSet::CardTableModRef:
1450 case BarrierSet::CardTableExtension:
1451 CardTableModRef_post_barrier(addr, new_val);
1452 break;
1453 case BarrierSet::ModRef:
1454 case BarrierSet::Other:
1455 // No post barriers
1456 break;
1457 default :
1458 ShouldNotReachHere();
1459 }
1460 }
1462 ////////////////////////////////////////////////////////////////////////
1463 #if INCLUDE_ALL_GCS
1465 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1466 bool do_load, bool patch, CodeEmitInfo* info) {
1467 // First we test whether marking is in progress.
1468 BasicType flag_type;
1469 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1470 flag_type = T_INT;
1471 } else {
1472 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1473 "Assumption");
1474 flag_type = T_BYTE;
1475 }
1476 LIR_Opr thrd = getThreadPointer();
1477 LIR_Address* mark_active_flag_addr =
1478 new LIR_Address(thrd,
1479 in_bytes(JavaThread::satb_mark_queue_offset() +
1480 PtrQueue::byte_offset_of_active()),
1481 flag_type);
1482 // Read the marking-in-progress flag.
1483 LIR_Opr flag_val = new_register(T_INT);
1484 __ load(mark_active_flag_addr, flag_val);
1485 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1487 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1489 CodeStub* slow;
1491 if (do_load) {
1492 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1493 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1495 if (patch)
1496 pre_val_patch_code = lir_patch_normal;
1498 pre_val = new_register(T_OBJECT);
1500 if (!addr_opr->is_address()) {
1501 assert(addr_opr->is_register(), "must be");
1502 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1503 }
1504 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1505 } else {
1506 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1507 assert(pre_val->is_register(), "must be");
1508 assert(pre_val->type() == T_OBJECT, "must be an object");
1509 assert(info == NULL, "sanity");
1511 slow = new G1PreBarrierStub(pre_val);
1512 }
1514 __ branch(lir_cond_notEqual, T_INT, slow);
1515 __ branch_destination(slow->continuation());
1516 }
1518 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1519 // If the "new_val" is a constant NULL, no barrier is necessary.
1520 if (new_val->is_constant() &&
1521 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1523 if (!new_val->is_register()) {
1524 LIR_Opr new_val_reg = new_register(T_OBJECT);
1525 if (new_val->is_constant()) {
1526 __ move(new_val, new_val_reg);
1527 } else {
1528 __ leal(new_val, new_val_reg);
1529 }
1530 new_val = new_val_reg;
1531 }
1532 assert(new_val->is_register(), "must be a register at this point");
1534 if (addr->is_address()) {
1535 LIR_Address* address = addr->as_address_ptr();
1536 LIR_Opr ptr = new_pointer_register();
1537 if (!address->index()->is_valid() && address->disp() == 0) {
1538 __ move(address->base(), ptr);
1539 } else {
1540 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1541 __ leal(addr, ptr);
1542 }
1543 addr = ptr;
1544 }
1545 assert(addr->is_register(), "must be a register at this point");
1547 LIR_Opr xor_res = new_pointer_register();
1548 LIR_Opr xor_shift_res = new_pointer_register();
1549 if (TwoOperandLIRForm ) {
1550 __ move(addr, xor_res);
1551 __ logical_xor(xor_res, new_val, xor_res);
1552 __ move(xor_res, xor_shift_res);
1553 __ unsigned_shift_right(xor_shift_res,
1554 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1555 xor_shift_res,
1556 LIR_OprDesc::illegalOpr());
1557 } else {
1558 __ logical_xor(addr, new_val, xor_res);
1559 __ unsigned_shift_right(xor_res,
1560 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1561 xor_shift_res,
1562 LIR_OprDesc::illegalOpr());
1563 }
1565 if (!new_val->is_register()) {
1566 LIR_Opr new_val_reg = new_register(T_OBJECT);
1567 __ leal(new_val, new_val_reg);
1568 new_val = new_val_reg;
1569 }
1570 assert(new_val->is_register(), "must be a register at this point");
1572 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1574 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1575 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1576 __ branch_destination(slow->continuation());
1577 }
1579 #endif // INCLUDE_ALL_GCS
1580 ////////////////////////////////////////////////////////////////////////
1582 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1584 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1585 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1586 if (addr->is_address()) {
1587 LIR_Address* address = addr->as_address_ptr();
1588 // ptr cannot be an object because we use this barrier for array card marks
1589 // and addr can point in the middle of an array.
1590 LIR_Opr ptr = new_pointer_register();
1591 if (!address->index()->is_valid() && address->disp() == 0) {
1592 __ move(address->base(), ptr);
1593 } else {
1594 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1595 __ leal(addr, ptr);
1596 }
1597 addr = ptr;
1598 }
1599 assert(addr->is_register(), "must be a register at this point");
1601 #ifdef ARM
1602 // TODO: ARM - move to platform-dependent code
1603 LIR_Opr tmp = FrameMap::R14_opr;
1604 if (VM_Version::supports_movw()) {
1605 __ move((LIR_Opr)card_table_base, tmp);
1606 } else {
1607 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1608 }
1610 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1611 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1612 if(((int)ct->byte_map_base & 0xff) == 0) {
1613 __ move(tmp, card_addr);
1614 } else {
1615 LIR_Opr tmp_zero = new_register(T_INT);
1616 __ move(LIR_OprFact::intConst(0), tmp_zero);
1617 __ move(tmp_zero, card_addr);
1618 }
1619 #else // ARM
1620 LIR_Opr tmp = new_pointer_register();
1621 if (TwoOperandLIRForm) {
1622 __ move(addr, tmp);
1623 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1624 } else {
1625 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1626 }
1627 if (can_inline_as_constant(card_table_base)) {
1628 __ move(LIR_OprFact::intConst(0),
1629 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1630 } else {
1631 __ move(LIR_OprFact::intConst(0),
1632 new LIR_Address(tmp, load_constant(card_table_base),
1633 T_BYTE));
1634 }
1635 #endif // ARM
1636 }
1639 //------------------------field access--------------------------------------
1641 // Comment copied form templateTable_i486.cpp
1642 // ----------------------------------------------------------------------------
1643 // Volatile variables demand their effects be made known to all CPU's in
1644 // order. Store buffers on most chips allow reads & writes to reorder; the
1645 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1646 // memory barrier (i.e., it's not sufficient that the interpreter does not
1647 // reorder volatile references, the hardware also must not reorder them).
1648 //
1649 // According to the new Java Memory Model (JMM):
1650 // (1) All volatiles are serialized wrt to each other.
1651 // ALSO reads & writes act as aquire & release, so:
1652 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1653 // the read float up to before the read. It's OK for non-volatile memory refs
1654 // that happen before the volatile read to float down below it.
1655 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1656 // that happen BEFORE the write float down to after the write. It's OK for
1657 // non-volatile memory refs that happen after the volatile write to float up
1658 // before it.
1659 //
1660 // We only put in barriers around volatile refs (they are expensive), not
1661 // _between_ memory refs (that would require us to track the flavor of the
1662 // previous memory refs). Requirements (2) and (3) require some barriers
1663 // before volatile stores and after volatile loads. These nearly cover
1664 // requirement (1) but miss the volatile-store-volatile-load case. This final
1665 // case is placed after volatile-stores although it could just as well go
1666 // before volatile-loads.
1669 void LIRGenerator::do_StoreField(StoreField* x) {
1670 bool needs_patching = x->needs_patching();
1671 bool is_volatile = x->field()->is_volatile();
1672 BasicType field_type = x->field_type();
1673 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1675 CodeEmitInfo* info = NULL;
1676 if (needs_patching) {
1677 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1678 info = state_for(x, x->state_before());
1679 } else if (x->needs_null_check()) {
1680 NullCheck* nc = x->explicit_null_check();
1681 if (nc == NULL) {
1682 info = state_for(x);
1683 } else {
1684 info = state_for(nc);
1685 }
1686 }
1689 LIRItem object(x->obj(), this);
1690 LIRItem value(x->value(), this);
1692 object.load_item();
1694 if (is_volatile || needs_patching) {
1695 // load item if field is volatile (fewer special cases for volatiles)
1696 // load item if field not initialized
1697 // load item if field not constant
1698 // because of code patching we cannot inline constants
1699 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1700 value.load_byte_item();
1701 } else {
1702 value.load_item();
1703 }
1704 } else {
1705 value.load_for_store(field_type);
1706 }
1708 set_no_result(x);
1710 #ifndef PRODUCT
1711 if (PrintNotLoaded && needs_patching) {
1712 tty->print_cr(" ###class not loaded at store_%s bci %d",
1713 x->is_static() ? "static" : "field", x->printable_bci());
1714 }
1715 #endif
1717 if (x->needs_null_check() &&
1718 (needs_patching ||
1719 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1720 // emit an explicit null check because the offset is too large
1721 __ null_check(object.result(), new CodeEmitInfo(info));
1722 }
1724 LIR_Address* address;
1725 if (needs_patching) {
1726 // we need to patch the offset in the instruction so don't allow
1727 // generate_address to try to be smart about emitting the -1.
1728 // Otherwise the patching code won't know how to find the
1729 // instruction to patch.
1730 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1731 } else {
1732 address = generate_address(object.result(), x->offset(), field_type);
1733 }
1735 if (is_volatile && os::is_MP()) {
1736 __ membar_release();
1737 }
1739 if (is_oop) {
1740 // Do the pre-write barrier, if any.
1741 pre_barrier(LIR_OprFact::address(address),
1742 LIR_OprFact::illegalOpr /* pre_val */,
1743 true /* do_load*/,
1744 needs_patching,
1745 (info ? new CodeEmitInfo(info) : NULL));
1746 }
1748 if (is_volatile && !needs_patching) {
1749 volatile_field_store(value.result(), address, info);
1750 } else {
1751 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1752 __ store(value.result(), address, info, patch_code);
1753 }
1755 if (is_oop) {
1756 // Store to object so mark the card of the header
1757 post_barrier(object.result(), value.result());
1758 }
1760 if (is_volatile && os::is_MP()) {
1761 __ membar();
1762 }
1763 }
1766 void LIRGenerator::do_LoadField(LoadField* x) {
1767 bool needs_patching = x->needs_patching();
1768 bool is_volatile = x->field()->is_volatile();
1769 BasicType field_type = x->field_type();
1771 CodeEmitInfo* info = NULL;
1772 if (needs_patching) {
1773 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1774 info = state_for(x, x->state_before());
1775 } else if (x->needs_null_check()) {
1776 NullCheck* nc = x->explicit_null_check();
1777 if (nc == NULL) {
1778 info = state_for(x);
1779 } else {
1780 info = state_for(nc);
1781 }
1782 }
1784 LIRItem object(x->obj(), this);
1786 object.load_item();
1788 #ifndef PRODUCT
1789 if (PrintNotLoaded && needs_patching) {
1790 tty->print_cr(" ###class not loaded at load_%s bci %d",
1791 x->is_static() ? "static" : "field", x->printable_bci());
1792 }
1793 #endif
1795 if (x->needs_null_check() &&
1796 (needs_patching ||
1797 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1798 // emit an explicit null check because the offset is too large
1799 __ null_check(object.result(), new CodeEmitInfo(info));
1800 }
1802 LIR_Opr reg = rlock_result(x, field_type);
1803 LIR_Address* address;
1804 if (needs_patching) {
1805 // we need to patch the offset in the instruction so don't allow
1806 // generate_address to try to be smart about emitting the -1.
1807 // Otherwise the patching code won't know how to find the
1808 // instruction to patch.
1809 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1810 } else {
1811 address = generate_address(object.result(), x->offset(), field_type);
1812 }
1814 if (is_volatile && !needs_patching) {
1815 volatile_field_load(address, reg, info);
1816 } else {
1817 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1818 __ load(address, reg, info, patch_code);
1819 }
1821 if (is_volatile && os::is_MP()) {
1822 __ membar_acquire();
1823 }
1824 }
1827 //------------------------java.nio.Buffer.checkIndex------------------------
1829 // int java.nio.Buffer.checkIndex(int)
1830 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1831 // NOTE: by the time we are in checkIndex() we are guaranteed that
1832 // the buffer is non-null (because checkIndex is package-private and
1833 // only called from within other methods in the buffer).
1834 assert(x->number_of_arguments() == 2, "wrong type");
1835 LIRItem buf (x->argument_at(0), this);
1836 LIRItem index(x->argument_at(1), this);
1837 buf.load_item();
1838 index.load_item();
1840 LIR_Opr result = rlock_result(x);
1841 if (GenerateRangeChecks) {
1842 CodeEmitInfo* info = state_for(x);
1843 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1844 if (index.result()->is_constant()) {
1845 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1846 __ branch(lir_cond_belowEqual, T_INT, stub);
1847 } else {
1848 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1849 java_nio_Buffer::limit_offset(), T_INT, info);
1850 __ branch(lir_cond_aboveEqual, T_INT, stub);
1851 }
1852 __ move(index.result(), result);
1853 } else {
1854 // Just load the index into the result register
1855 __ move(index.result(), result);
1856 }
1857 }
1860 //------------------------array access--------------------------------------
1863 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1864 LIRItem array(x->array(), this);
1865 array.load_item();
1866 LIR_Opr reg = rlock_result(x);
1868 CodeEmitInfo* info = NULL;
1869 if (x->needs_null_check()) {
1870 NullCheck* nc = x->explicit_null_check();
1871 if (nc == NULL) {
1872 info = state_for(x);
1873 } else {
1874 info = state_for(nc);
1875 }
1876 }
1877 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1878 }
1881 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1882 bool use_length = x->length() != NULL;
1883 LIRItem array(x->array(), this);
1884 LIRItem index(x->index(), this);
1885 LIRItem length(this);
1886 bool needs_range_check = true;
1888 if (use_length) {
1889 needs_range_check = x->compute_needs_range_check();
1890 if (needs_range_check) {
1891 length.set_instruction(x->length());
1892 length.load_item();
1893 }
1894 }
1896 array.load_item();
1897 if (index.is_constant() && can_inline_as_constant(x->index())) {
1898 // let it be a constant
1899 index.dont_load_item();
1900 } else {
1901 index.load_item();
1902 }
1904 CodeEmitInfo* range_check_info = state_for(x);
1905 CodeEmitInfo* null_check_info = NULL;
1906 if (x->needs_null_check()) {
1907 NullCheck* nc = x->explicit_null_check();
1908 if (nc != NULL) {
1909 null_check_info = state_for(nc);
1910 } else {
1911 null_check_info = range_check_info;
1912 }
1913 }
1915 // emit array address setup early so it schedules better
1916 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1918 if (GenerateRangeChecks && needs_range_check) {
1919 if (use_length) {
1920 // TODO: use a (modified) version of array_range_check that does not require a
1921 // constant length to be loaded to a register
1922 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1923 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1924 } else {
1925 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1926 // The range check performs the null check, so clear it out for the load
1927 null_check_info = NULL;
1928 }
1929 }
1931 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1932 }
1935 void LIRGenerator::do_NullCheck(NullCheck* x) {
1936 if (x->can_trap()) {
1937 LIRItem value(x->obj(), this);
1938 value.load_item();
1939 CodeEmitInfo* info = state_for(x);
1940 __ null_check(value.result(), info);
1941 }
1942 }
1945 void LIRGenerator::do_TypeCast(TypeCast* x) {
1946 LIRItem value(x->obj(), this);
1947 value.load_item();
1948 // the result is the same as from the node we are casting
1949 set_result(x, value.result());
1950 }
1953 void LIRGenerator::do_Throw(Throw* x) {
1954 LIRItem exception(x->exception(), this);
1955 exception.load_item();
1956 set_no_result(x);
1957 LIR_Opr exception_opr = exception.result();
1958 CodeEmitInfo* info = state_for(x, x->state());
1960 #ifndef PRODUCT
1961 if (PrintC1Statistics) {
1962 increment_counter(Runtime1::throw_count_address(), T_INT);
1963 }
1964 #endif
1966 // check if the instruction has an xhandler in any of the nested scopes
1967 bool unwind = false;
1968 if (info->exception_handlers()->length() == 0) {
1969 // this throw is not inside an xhandler
1970 unwind = true;
1971 } else {
1972 // get some idea of the throw type
1973 bool type_is_exact = true;
1974 ciType* throw_type = x->exception()->exact_type();
1975 if (throw_type == NULL) {
1976 type_is_exact = false;
1977 throw_type = x->exception()->declared_type();
1978 }
1979 if (throw_type != NULL && throw_type->is_instance_klass()) {
1980 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1981 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1982 }
1983 }
1985 // do null check before moving exception oop into fixed register
1986 // to avoid a fixed interval with an oop during the null check.
1987 // Use a copy of the CodeEmitInfo because debug information is
1988 // different for null_check and throw.
1989 if (GenerateCompilerNullChecks &&
1990 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1991 // if the exception object wasn't created using new then it might be null.
1992 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1993 }
1995 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1996 // we need to go through the exception lookup path to get JVMTI
1997 // notification done
1998 unwind = false;
1999 }
2001 // move exception oop into fixed register
2002 __ move(exception_opr, exceptionOopOpr());
2004 if (unwind) {
2005 __ unwind_exception(exceptionOopOpr());
2006 } else {
2007 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2008 }
2009 }
2012 void LIRGenerator::do_RoundFP(RoundFP* x) {
2013 LIRItem input(x->input(), this);
2014 input.load_item();
2015 LIR_Opr input_opr = input.result();
2016 assert(input_opr->is_register(), "why round if value is not in a register?");
2017 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2018 if (input_opr->is_single_fpu()) {
2019 set_result(x, round_item(input_opr)); // This code path not currently taken
2020 } else {
2021 LIR_Opr result = new_register(T_DOUBLE);
2022 set_vreg_flag(result, must_start_in_memory);
2023 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2024 set_result(x, result);
2025 }
2026 }
2028 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2029 LIRItem base(x->base(), this);
2030 LIRItem idx(this);
2032 base.load_item();
2033 if (x->has_index()) {
2034 idx.set_instruction(x->index());
2035 idx.load_nonconstant();
2036 }
2038 LIR_Opr reg = rlock_result(x, x->basic_type());
2040 int log2_scale = 0;
2041 if (x->has_index()) {
2042 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2043 log2_scale = x->log2_scale();
2044 }
2046 assert(!x->has_index() || idx.value() == x->index(), "should match");
2048 LIR_Opr base_op = base.result();
2049 #ifndef _LP64
2050 if (x->base()->type()->tag() == longTag) {
2051 base_op = new_register(T_INT);
2052 __ convert(Bytecodes::_l2i, base.result(), base_op);
2053 } else {
2054 assert(x->base()->type()->tag() == intTag, "must be");
2055 }
2056 #endif
2058 BasicType dst_type = x->basic_type();
2059 LIR_Opr index_op = idx.result();
2061 LIR_Address* addr;
2062 if (index_op->is_constant()) {
2063 assert(log2_scale == 0, "must not have a scale");
2064 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2065 } else {
2066 #ifdef X86
2067 #ifdef _LP64
2068 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2069 LIR_Opr tmp = new_pointer_register();
2070 __ convert(Bytecodes::_i2l, index_op, tmp);
2071 index_op = tmp;
2072 }
2073 #endif
2074 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2075 #elif defined(ARM)
2076 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2077 #else
2078 if (index_op->is_illegal() || log2_scale == 0) {
2079 #ifdef _LP64
2080 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2081 LIR_Opr tmp = new_pointer_register();
2082 __ convert(Bytecodes::_i2l, index_op, tmp);
2083 index_op = tmp;
2084 }
2085 #endif
2086 addr = new LIR_Address(base_op, index_op, dst_type);
2087 } else {
2088 LIR_Opr tmp = new_pointer_register();
2089 __ shift_left(index_op, log2_scale, tmp);
2090 addr = new LIR_Address(base_op, tmp, dst_type);
2091 }
2092 #endif
2093 }
2095 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2096 __ unaligned_move(addr, reg);
2097 } else {
2098 if (dst_type == T_OBJECT && x->is_wide()) {
2099 __ move_wide(addr, reg);
2100 } else {
2101 __ move(addr, reg);
2102 }
2103 }
2104 }
2107 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2108 int log2_scale = 0;
2109 BasicType type = x->basic_type();
2111 if (x->has_index()) {
2112 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2113 log2_scale = x->log2_scale();
2114 }
2116 LIRItem base(x->base(), this);
2117 LIRItem value(x->value(), this);
2118 LIRItem idx(this);
2120 base.load_item();
2121 if (x->has_index()) {
2122 idx.set_instruction(x->index());
2123 idx.load_item();
2124 }
2126 if (type == T_BYTE || type == T_BOOLEAN) {
2127 value.load_byte_item();
2128 } else {
2129 value.load_item();
2130 }
2132 set_no_result(x);
2134 LIR_Opr base_op = base.result();
2135 #ifndef _LP64
2136 if (x->base()->type()->tag() == longTag) {
2137 base_op = new_register(T_INT);
2138 __ convert(Bytecodes::_l2i, base.result(), base_op);
2139 } else {
2140 assert(x->base()->type()->tag() == intTag, "must be");
2141 }
2142 #endif
2144 LIR_Opr index_op = idx.result();
2145 if (log2_scale != 0) {
2146 // temporary fix (platform dependent code without shift on Intel would be better)
2147 index_op = new_pointer_register();
2148 #ifdef _LP64
2149 if(idx.result()->type() == T_INT) {
2150 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2151 } else {
2152 #endif
2153 // TODO: ARM also allows embedded shift in the address
2154 __ move(idx.result(), index_op);
2155 #ifdef _LP64
2156 }
2157 #endif
2158 __ shift_left(index_op, log2_scale, index_op);
2159 }
2160 #ifdef _LP64
2161 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
2162 LIR_Opr tmp = new_pointer_register();
2163 __ convert(Bytecodes::_i2l, index_op, tmp);
2164 index_op = tmp;
2165 }
2166 #endif
2168 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2169 __ move(value.result(), addr);
2170 }
2173 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2174 BasicType type = x->basic_type();
2175 LIRItem src(x->object(), this);
2176 LIRItem off(x->offset(), this);
2178 off.load_item();
2179 src.load_item();
2181 LIR_Opr value = rlock_result(x, x->basic_type());
2183 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2185 #if INCLUDE_ALL_GCS
2186 // We might be reading the value of the referent field of a
2187 // Reference object in order to attach it back to the live
2188 // object graph. If G1 is enabled then we need to record
2189 // the value that is being returned in an SATB log buffer.
2190 //
2191 // We need to generate code similar to the following...
2192 //
2193 // if (offset == java_lang_ref_Reference::referent_offset) {
2194 // if (src != NULL) {
2195 // if (klass(src)->reference_type() != REF_NONE) {
2196 // pre_barrier(..., value, ...);
2197 // }
2198 // }
2199 // }
2201 if (UseG1GC && type == T_OBJECT) {
2202 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2203 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2204 bool gen_source_check = true; // Assume we need to check the src object for null.
2205 bool gen_type_check = true; // Assume we need to check the reference_type.
2207 if (off.is_constant()) {
2208 jlong off_con = (off.type()->is_int() ?
2209 (jlong) off.get_jint_constant() :
2210 off.get_jlong_constant());
2213 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2214 // The constant offset is something other than referent_offset.
2215 // We can skip generating/checking the remaining guards and
2216 // skip generation of the code stub.
2217 gen_pre_barrier = false;
2218 } else {
2219 // The constant offset is the same as referent_offset -
2220 // we do not need to generate a runtime offset check.
2221 gen_offset_check = false;
2222 }
2223 }
2225 // We don't need to generate stub if the source object is an array
2226 if (gen_pre_barrier && src.type()->is_array()) {
2227 gen_pre_barrier = false;
2228 }
2230 if (gen_pre_barrier) {
2231 // We still need to continue with the checks.
2232 if (src.is_constant()) {
2233 ciObject* src_con = src.get_jobject_constant();
2235 if (src_con->is_null_object()) {
2236 // The constant src object is null - We can skip
2237 // generating the code stub.
2238 gen_pre_barrier = false;
2239 } else {
2240 // Non-null constant source object. We still have to generate
2241 // the slow stub - but we don't need to generate the runtime
2242 // null object check.
2243 gen_source_check = false;
2244 }
2245 }
2246 }
2247 if (gen_pre_barrier && !PatchALot) {
2248 // Can the klass of object be statically determined to be
2249 // a sub-class of Reference?
2250 ciType* type = src.value()->declared_type();
2251 if ((type != NULL) && type->is_loaded()) {
2252 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2253 gen_type_check = false;
2254 } else if (type->is_klass() &&
2255 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2256 // Not Reference and not Object klass.
2257 gen_pre_barrier = false;
2258 }
2259 }
2260 }
2262 if (gen_pre_barrier) {
2263 LabelObj* Lcont = new LabelObj();
2265 // We can have generate one runtime check here. Let's start with
2266 // the offset check.
2267 if (gen_offset_check) {
2268 // if (offset != referent_offset) -> continue
2269 // If offset is an int then we can do the comparison with the
2270 // referent_offset constant; otherwise we need to move
2271 // referent_offset into a temporary register and generate
2272 // a reg-reg compare.
2274 LIR_Opr referent_off;
2276 if (off.type()->is_int()) {
2277 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2278 } else {
2279 assert(off.type()->is_long(), "what else?");
2280 referent_off = new_register(T_LONG);
2281 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2282 }
2283 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2284 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2285 }
2286 if (gen_source_check) {
2287 // offset is a const and equals referent offset
2288 // if (source == null) -> continue
2289 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2290 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2291 }
2292 LIR_Opr src_klass = new_register(T_OBJECT);
2293 if (gen_type_check) {
2294 // We have determined that offset == referent_offset && src != null.
2295 // if (src->_klass->_reference_type == REF_NONE) -> continue
2296 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), UseCompressedKlassPointers ? T_OBJECT : T_ADDRESS), src_klass);
2297 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2298 LIR_Opr reference_type = new_register(T_INT);
2299 __ move(reference_type_addr, reference_type);
2300 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2301 __ branch(lir_cond_equal, T_INT, Lcont->label());
2302 }
2303 {
2304 // We have determined that src->_klass->_reference_type != REF_NONE
2305 // so register the value in the referent field with the pre-barrier.
2306 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2307 value /* pre_val */,
2308 false /* do_load */,
2309 false /* patch */,
2310 NULL /* info */);
2311 }
2312 __ branch_destination(Lcont->label());
2313 }
2314 }
2315 #endif // INCLUDE_ALL_GCS
2317 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2318 }
2321 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2322 BasicType type = x->basic_type();
2323 LIRItem src(x->object(), this);
2324 LIRItem off(x->offset(), this);
2325 LIRItem data(x->value(), this);
2327 src.load_item();
2328 if (type == T_BOOLEAN || type == T_BYTE) {
2329 data.load_byte_item();
2330 } else {
2331 data.load_item();
2332 }
2333 off.load_item();
2335 set_no_result(x);
2337 if (x->is_volatile() && os::is_MP()) __ membar_release();
2338 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2339 if (x->is_volatile() && os::is_MP()) __ membar();
2340 }
2343 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2344 LIRItem src(x->object(), this);
2345 LIRItem off(x->offset(), this);
2347 src.load_item();
2348 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2349 // let it be a constant
2350 off.dont_load_item();
2351 } else {
2352 off.load_item();
2353 }
2355 set_no_result(x);
2357 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2358 __ prefetch(addr, is_store);
2359 }
2362 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2363 do_UnsafePrefetch(x, false);
2364 }
2367 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2368 do_UnsafePrefetch(x, true);
2369 }
2372 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2373 int lng = x->length();
2375 for (int i = 0; i < lng; i++) {
2376 SwitchRange* one_range = x->at(i);
2377 int low_key = one_range->low_key();
2378 int high_key = one_range->high_key();
2379 BlockBegin* dest = one_range->sux();
2380 if (low_key == high_key) {
2381 __ cmp(lir_cond_equal, value, low_key);
2382 __ branch(lir_cond_equal, T_INT, dest);
2383 } else if (high_key - low_key == 1) {
2384 __ cmp(lir_cond_equal, value, low_key);
2385 __ branch(lir_cond_equal, T_INT, dest);
2386 __ cmp(lir_cond_equal, value, high_key);
2387 __ branch(lir_cond_equal, T_INT, dest);
2388 } else {
2389 LabelObj* L = new LabelObj();
2390 __ cmp(lir_cond_less, value, low_key);
2391 __ branch(lir_cond_less, T_INT, L->label());
2392 __ cmp(lir_cond_lessEqual, value, high_key);
2393 __ branch(lir_cond_lessEqual, T_INT, dest);
2394 __ branch_destination(L->label());
2395 }
2396 }
2397 __ jump(default_sux);
2398 }
2401 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2402 SwitchRangeList* res = new SwitchRangeList();
2403 int len = x->length();
2404 if (len > 0) {
2405 BlockBegin* sux = x->sux_at(0);
2406 int key = x->lo_key();
2407 BlockBegin* default_sux = x->default_sux();
2408 SwitchRange* range = new SwitchRange(key, sux);
2409 for (int i = 0; i < len; i++, key++) {
2410 BlockBegin* new_sux = x->sux_at(i);
2411 if (sux == new_sux) {
2412 // still in same range
2413 range->set_high_key(key);
2414 } else {
2415 // skip tests which explicitly dispatch to the default
2416 if (sux != default_sux) {
2417 res->append(range);
2418 }
2419 range = new SwitchRange(key, new_sux);
2420 }
2421 sux = new_sux;
2422 }
2423 if (res->length() == 0 || res->last() != range) res->append(range);
2424 }
2425 return res;
2426 }
2429 // we expect the keys to be sorted by increasing value
2430 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2431 SwitchRangeList* res = new SwitchRangeList();
2432 int len = x->length();
2433 if (len > 0) {
2434 BlockBegin* default_sux = x->default_sux();
2435 int key = x->key_at(0);
2436 BlockBegin* sux = x->sux_at(0);
2437 SwitchRange* range = new SwitchRange(key, sux);
2438 for (int i = 1; i < len; i++) {
2439 int new_key = x->key_at(i);
2440 BlockBegin* new_sux = x->sux_at(i);
2441 if (key+1 == new_key && sux == new_sux) {
2442 // still in same range
2443 range->set_high_key(new_key);
2444 } else {
2445 // skip tests which explicitly dispatch to the default
2446 if (range->sux() != default_sux) {
2447 res->append(range);
2448 }
2449 range = new SwitchRange(new_key, new_sux);
2450 }
2451 key = new_key;
2452 sux = new_sux;
2453 }
2454 if (res->length() == 0 || res->last() != range) res->append(range);
2455 }
2456 return res;
2457 }
2460 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2461 LIRItem tag(x->tag(), this);
2462 tag.load_item();
2463 set_no_result(x);
2465 if (x->is_safepoint()) {
2466 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2467 }
2469 // move values into phi locations
2470 move_to_phi(x->state());
2472 int lo_key = x->lo_key();
2473 int hi_key = x->hi_key();
2474 int len = x->length();
2475 LIR_Opr value = tag.result();
2476 if (UseTableRanges) {
2477 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2478 } else {
2479 for (int i = 0; i < len; i++) {
2480 __ cmp(lir_cond_equal, value, i + lo_key);
2481 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2482 }
2483 __ jump(x->default_sux());
2484 }
2485 }
2488 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2489 LIRItem tag(x->tag(), this);
2490 tag.load_item();
2491 set_no_result(x);
2493 if (x->is_safepoint()) {
2494 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2495 }
2497 // move values into phi locations
2498 move_to_phi(x->state());
2500 LIR_Opr value = tag.result();
2501 if (UseTableRanges) {
2502 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2503 } else {
2504 int len = x->length();
2505 for (int i = 0; i < len; i++) {
2506 __ cmp(lir_cond_equal, value, x->key_at(i));
2507 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2508 }
2509 __ jump(x->default_sux());
2510 }
2511 }
2514 void LIRGenerator::do_Goto(Goto* x) {
2515 set_no_result(x);
2517 if (block()->next()->as_OsrEntry()) {
2518 // need to free up storage used for OSR entry point
2519 LIR_Opr osrBuffer = block()->next()->operand();
2520 BasicTypeList signature;
2521 signature.append(T_INT);
2522 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2523 __ move(osrBuffer, cc->args()->at(0));
2524 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2525 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2526 }
2528 if (x->is_safepoint()) {
2529 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2531 // increment backedge counter if needed
2532 CodeEmitInfo* info = state_for(x, state);
2533 increment_backedge_counter(info, x->profiled_bci());
2534 CodeEmitInfo* safepoint_info = state_for(x, state);
2535 __ safepoint(safepoint_poll_register(), safepoint_info);
2536 }
2538 // Gotos can be folded Ifs, handle this case.
2539 if (x->should_profile()) {
2540 ciMethod* method = x->profiled_method();
2541 assert(method != NULL, "method should be set if branch is profiled");
2542 ciMethodData* md = method->method_data_or_null();
2543 assert(md != NULL, "Sanity");
2544 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2545 assert(data != NULL, "must have profiling data");
2546 int offset;
2547 if (x->direction() == Goto::taken) {
2548 assert(data->is_BranchData(), "need BranchData for two-way branches");
2549 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2550 } else if (x->direction() == Goto::not_taken) {
2551 assert(data->is_BranchData(), "need BranchData for two-way branches");
2552 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2553 } else {
2554 assert(data->is_JumpData(), "need JumpData for branches");
2555 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2556 }
2557 LIR_Opr md_reg = new_register(T_METADATA);
2558 __ metadata2reg(md->constant_encoding(), md_reg);
2560 increment_counter(new LIR_Address(md_reg, offset,
2561 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2562 }
2564 // emit phi-instruction move after safepoint since this simplifies
2565 // describing the state as the safepoint.
2566 move_to_phi(x->state());
2568 __ jump(x->default_sux());
2569 }
2572 void LIRGenerator::do_Base(Base* x) {
2573 __ std_entry(LIR_OprFact::illegalOpr);
2574 // Emit moves from physical registers / stack slots to virtual registers
2575 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2576 IRScope* irScope = compilation()->hir()->top_scope();
2577 int java_index = 0;
2578 for (int i = 0; i < args->length(); i++) {
2579 LIR_Opr src = args->at(i);
2580 assert(!src->is_illegal(), "check");
2581 BasicType t = src->type();
2583 // Types which are smaller than int are passed as int, so
2584 // correct the type which passed.
2585 switch (t) {
2586 case T_BYTE:
2587 case T_BOOLEAN:
2588 case T_SHORT:
2589 case T_CHAR:
2590 t = T_INT;
2591 break;
2592 }
2594 LIR_Opr dest = new_register(t);
2595 __ move(src, dest);
2597 // Assign new location to Local instruction for this local
2598 Local* local = x->state()->local_at(java_index)->as_Local();
2599 assert(local != NULL, "Locals for incoming arguments must have been created");
2600 #ifndef __SOFTFP__
2601 // The java calling convention passes double as long and float as int.
2602 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2603 #endif // __SOFTFP__
2604 local->set_operand(dest);
2605 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2606 java_index += type2size[t];
2607 }
2609 if (compilation()->env()->dtrace_method_probes()) {
2610 BasicTypeList signature;
2611 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2612 signature.append(T_OBJECT); // Method*
2613 LIR_OprList* args = new LIR_OprList();
2614 args->append(getThreadPointer());
2615 LIR_Opr meth = new_register(T_METADATA);
2616 __ metadata2reg(method()->constant_encoding(), meth);
2617 args->append(meth);
2618 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2619 }
2621 if (method()->is_synchronized()) {
2622 LIR_Opr obj;
2623 if (method()->is_static()) {
2624 obj = new_register(T_OBJECT);
2625 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2626 } else {
2627 Local* receiver = x->state()->local_at(0)->as_Local();
2628 assert(receiver != NULL, "must already exist");
2629 obj = receiver->operand();
2630 }
2631 assert(obj->is_valid(), "must be valid");
2633 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2634 LIR_Opr lock = new_register(T_INT);
2635 __ load_stack_address_monitor(0, lock);
2637 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2638 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2640 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2641 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2642 }
2643 }
2645 // increment invocation counters if needed
2646 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2647 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2648 increment_invocation_counter(info);
2649 }
2651 // all blocks with a successor must end with an unconditional jump
2652 // to the successor even if they are consecutive
2653 __ jump(x->default_sux());
2654 }
2657 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2658 // construct our frame and model the production of incoming pointer
2659 // to the OSR buffer.
2660 __ osr_entry(LIR_Assembler::osrBufferPointer());
2661 LIR_Opr result = rlock_result(x);
2662 __ move(LIR_Assembler::osrBufferPointer(), result);
2663 }
2666 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2667 assert(args->length() == arg_list->length(),
2668 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2669 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2670 LIRItem* param = args->at(i);
2671 LIR_Opr loc = arg_list->at(i);
2672 if (loc->is_register()) {
2673 param->load_item_force(loc);
2674 } else {
2675 LIR_Address* addr = loc->as_address_ptr();
2676 param->load_for_store(addr->type());
2677 if (addr->type() == T_OBJECT) {
2678 __ move_wide(param->result(), addr);
2679 } else
2680 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2681 __ unaligned_move(param->result(), addr);
2682 } else {
2683 __ move(param->result(), addr);
2684 }
2685 }
2686 }
2688 if (x->has_receiver()) {
2689 LIRItem* receiver = args->at(0);
2690 LIR_Opr loc = arg_list->at(0);
2691 if (loc->is_register()) {
2692 receiver->load_item_force(loc);
2693 } else {
2694 assert(loc->is_address(), "just checking");
2695 receiver->load_for_store(T_OBJECT);
2696 __ move_wide(receiver->result(), loc->as_address_ptr());
2697 }
2698 }
2699 }
2702 // Visits all arguments, returns appropriate items without loading them
2703 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2704 LIRItemList* argument_items = new LIRItemList();
2705 if (x->has_receiver()) {
2706 LIRItem* receiver = new LIRItem(x->receiver(), this);
2707 argument_items->append(receiver);
2708 }
2709 for (int i = 0; i < x->number_of_arguments(); i++) {
2710 LIRItem* param = new LIRItem(x->argument_at(i), this);
2711 argument_items->append(param);
2712 }
2713 return argument_items;
2714 }
2717 // The invoke with receiver has following phases:
2718 // a) traverse and load/lock receiver;
2719 // b) traverse all arguments -> item-array (invoke_visit_argument)
2720 // c) push receiver on stack
2721 // d) load each of the items and push on stack
2722 // e) unlock receiver
2723 // f) move receiver into receiver-register %o0
2724 // g) lock result registers and emit call operation
2725 //
2726 // Before issuing a call, we must spill-save all values on stack
2727 // that are in caller-save register. "spill-save" moves thos registers
2728 // either in a free callee-save register or spills them if no free
2729 // callee save register is available.
2730 //
2731 // The problem is where to invoke spill-save.
2732 // - if invoked between e) and f), we may lock callee save
2733 // register in "spill-save" that destroys the receiver register
2734 // before f) is executed
2735 // - if we rearange the f) to be earlier, by loading %o0, it
2736 // may destroy a value on the stack that is currently in %o0
2737 // and is waiting to be spilled
2738 // - if we keep the receiver locked while doing spill-save,
2739 // we cannot spill it as it is spill-locked
2740 //
2741 void LIRGenerator::do_Invoke(Invoke* x) {
2742 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2744 LIR_OprList* arg_list = cc->args();
2745 LIRItemList* args = invoke_visit_arguments(x);
2746 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2748 // setup result register
2749 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2750 if (x->type() != voidType) {
2751 result_register = result_register_for(x->type());
2752 }
2754 CodeEmitInfo* info = state_for(x, x->state());
2756 invoke_load_arguments(x, args, arg_list);
2758 if (x->has_receiver()) {
2759 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2760 receiver = args->at(0)->result();
2761 }
2763 // emit invoke code
2764 bool optimized = x->target_is_loaded() && x->target_is_final();
2765 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2767 // JSR 292
2768 // Preserve the SP over MethodHandle call sites.
2769 ciMethod* target = x->target();
2770 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2771 target->is_method_handle_intrinsic() ||
2772 target->is_compiled_lambda_form());
2773 if (is_method_handle_invoke) {
2774 info->set_is_method_handle_invoke(true);
2775 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2776 }
2778 switch (x->code()) {
2779 case Bytecodes::_invokestatic:
2780 __ call_static(target, result_register,
2781 SharedRuntime::get_resolve_static_call_stub(),
2782 arg_list, info);
2783 break;
2784 case Bytecodes::_invokespecial:
2785 case Bytecodes::_invokevirtual:
2786 case Bytecodes::_invokeinterface:
2787 // for final target we still produce an inline cache, in order
2788 // to be able to call mixed mode
2789 if (x->code() == Bytecodes::_invokespecial || optimized) {
2790 __ call_opt_virtual(target, receiver, result_register,
2791 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2792 arg_list, info);
2793 } else if (x->vtable_index() < 0) {
2794 __ call_icvirtual(target, receiver, result_register,
2795 SharedRuntime::get_resolve_virtual_call_stub(),
2796 arg_list, info);
2797 } else {
2798 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2799 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2800 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2801 }
2802 break;
2803 case Bytecodes::_invokedynamic: {
2804 __ call_dynamic(target, receiver, result_register,
2805 SharedRuntime::get_resolve_static_call_stub(),
2806 arg_list, info);
2807 break;
2808 }
2809 default:
2810 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2811 break;
2812 }
2814 // JSR 292
2815 // Restore the SP after MethodHandle call sites.
2816 if (is_method_handle_invoke) {
2817 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2818 }
2820 if (x->type()->is_float() || x->type()->is_double()) {
2821 // Force rounding of results from non-strictfp when in strictfp
2822 // scope (or when we don't know the strictness of the callee, to
2823 // be safe.)
2824 if (method()->is_strict()) {
2825 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2826 result_register = round_item(result_register);
2827 }
2828 }
2829 }
2831 if (result_register->is_valid()) {
2832 LIR_Opr result = rlock_result(x);
2833 __ move(result_register, result);
2834 }
2835 }
2838 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2839 assert(x->number_of_arguments() == 1, "wrong type");
2840 LIRItem value (x->argument_at(0), this);
2841 LIR_Opr reg = rlock_result(x);
2842 value.load_item();
2843 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2844 __ move(tmp, reg);
2845 }
2849 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2850 void LIRGenerator::do_IfOp(IfOp* x) {
2851 #ifdef ASSERT
2852 {
2853 ValueTag xtag = x->x()->type()->tag();
2854 ValueTag ttag = x->tval()->type()->tag();
2855 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2856 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2857 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2858 }
2859 #endif
2861 LIRItem left(x->x(), this);
2862 LIRItem right(x->y(), this);
2863 left.load_item();
2864 if (can_inline_as_constant(right.value())) {
2865 right.dont_load_item();
2866 } else {
2867 right.load_item();
2868 }
2870 LIRItem t_val(x->tval(), this);
2871 LIRItem f_val(x->fval(), this);
2872 t_val.dont_load_item();
2873 f_val.dont_load_item();
2874 LIR_Opr reg = rlock_result(x);
2876 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2877 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2878 }
2880 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
2881 assert(x->number_of_arguments() == expected_arguments, "wrong type");
2882 LIR_Opr reg = result_register_for(x->type());
2883 __ call_runtime_leaf(routine, getThreadTemp(),
2884 reg, new LIR_OprList());
2885 LIR_Opr result = rlock_result(x);
2886 __ move(reg, result);
2887 }
2889 #ifdef TRACE_HAVE_INTRINSICS
2890 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
2891 LIR_Opr thread = getThreadPointer();
2892 LIR_Opr osthread = new_pointer_register();
2893 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
2894 size_t thread_id_size = OSThread::thread_id_size();
2895 if (thread_id_size == (size_t) BytesPerLong) {
2896 LIR_Opr id = new_register(T_LONG);
2897 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
2898 __ convert(Bytecodes::_l2i, id, rlock_result(x));
2899 } else if (thread_id_size == (size_t) BytesPerInt) {
2900 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
2901 } else {
2902 ShouldNotReachHere();
2903 }
2904 }
2906 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2907 CodeEmitInfo* info = state_for(x);
2908 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2909 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
2910 assert(info != NULL, "must have info");
2911 LIRItem arg(x->argument_at(1), this);
2912 arg.load_item();
2913 LIR_Opr klass = new_pointer_register();
2914 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
2915 LIR_Opr id = new_register(T_LONG);
2916 ByteSize offset = TRACE_ID_OFFSET;
2917 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2918 __ move(trace_id_addr, id);
2919 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2920 __ store(id, trace_id_addr);
2921 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
2922 __ move(id, rlock_result(x));
2923 }
2924 #endif
2926 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2927 switch (x->id()) {
2928 case vmIntrinsics::_intBitsToFloat :
2929 case vmIntrinsics::_doubleToRawLongBits :
2930 case vmIntrinsics::_longBitsToDouble :
2931 case vmIntrinsics::_floatToRawIntBits : {
2932 do_FPIntrinsics(x);
2933 break;
2934 }
2936 #ifdef TRACE_HAVE_INTRINSICS
2937 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
2938 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
2939 case vmIntrinsics::_counterTime:
2940 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
2941 break;
2942 #endif
2944 case vmIntrinsics::_currentTimeMillis:
2945 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
2946 break;
2948 case vmIntrinsics::_nanoTime:
2949 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
2950 break;
2952 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2953 case vmIntrinsics::_isInstance: do_isInstance(x); break;
2954 case vmIntrinsics::_getClass: do_getClass(x); break;
2955 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2957 case vmIntrinsics::_dlog: // fall through
2958 case vmIntrinsics::_dlog10: // fall through
2959 case vmIntrinsics::_dabs: // fall through
2960 case vmIntrinsics::_dsqrt: // fall through
2961 case vmIntrinsics::_dtan: // fall through
2962 case vmIntrinsics::_dsin : // fall through
2963 case vmIntrinsics::_dcos : // fall through
2964 case vmIntrinsics::_dexp : // fall through
2965 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
2966 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
2968 // java.nio.Buffer.checkIndex
2969 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
2971 case vmIntrinsics::_compareAndSwapObject:
2972 do_CompareAndSwap(x, objectType);
2973 break;
2974 case vmIntrinsics::_compareAndSwapInt:
2975 do_CompareAndSwap(x, intType);
2976 break;
2977 case vmIntrinsics::_compareAndSwapLong:
2978 do_CompareAndSwap(x, longType);
2979 break;
2981 case vmIntrinsics::_loadFence :
2982 if (os::is_MP()) __ membar_acquire();
2983 break;
2984 case vmIntrinsics::_storeFence:
2985 if (os::is_MP()) __ membar_release();
2986 break;
2987 case vmIntrinsics::_fullFence :
2988 if (os::is_MP()) __ membar();
2989 break;
2991 case vmIntrinsics::_Reference_get:
2992 do_Reference_get(x);
2993 break;
2995 default: ShouldNotReachHere(); break;
2996 }
2997 }
2999 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3000 // Need recv in a temporary register so it interferes with the other temporaries
3001 LIR_Opr recv = LIR_OprFact::illegalOpr;
3002 LIR_Opr mdo = new_register(T_OBJECT);
3003 // tmp is used to hold the counters on SPARC
3004 LIR_Opr tmp = new_pointer_register();
3005 if (x->recv() != NULL) {
3006 LIRItem value(x->recv(), this);
3007 value.load_item();
3008 recv = new_register(T_OBJECT);
3009 __ move(value.result(), recv);
3010 }
3011 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3012 }
3014 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3015 // We can safely ignore accessors here, since c2 will inline them anyway,
3016 // accessors are also always mature.
3017 if (!x->inlinee()->is_accessor()) {
3018 CodeEmitInfo* info = state_for(x, x->state(), true);
3019 // Notify the runtime very infrequently only to take care of counter overflows
3020 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3021 }
3022 }
3024 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3025 int freq_log;
3026 int level = compilation()->env()->comp_level();
3027 if (level == CompLevel_limited_profile) {
3028 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3029 } else if (level == CompLevel_full_profile) {
3030 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3031 } else {
3032 ShouldNotReachHere();
3033 }
3034 // Increment the appropriate invocation/backedge counter and notify the runtime.
3035 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3036 }
3038 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3039 ciMethod *method, int frequency,
3040 int bci, bool backedge, bool notify) {
3041 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3042 int level = _compilation->env()->comp_level();
3043 assert(level > CompLevel_simple, "Shouldn't be here");
3045 int offset = -1;
3046 LIR_Opr counter_holder = new_register(T_METADATA);
3047 LIR_Opr meth;
3048 if (level == CompLevel_limited_profile) {
3049 offset = in_bytes(backedge ? Method::backedge_counter_offset() :
3050 Method::invocation_counter_offset());
3051 __ metadata2reg(method->constant_encoding(), counter_holder);
3052 meth = counter_holder;
3053 } else if (level == CompLevel_full_profile) {
3054 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3055 MethodData::invocation_counter_offset());
3056 ciMethodData* md = method->method_data_or_null();
3057 assert(md != NULL, "Sanity");
3058 __ metadata2reg(md->constant_encoding(), counter_holder);
3059 meth = new_register(T_METADATA);
3060 __ metadata2reg(method->constant_encoding(), meth);
3061 } else {
3062 ShouldNotReachHere();
3063 }
3064 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3065 LIR_Opr result = new_register(T_INT);
3066 __ load(counter, result);
3067 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3068 __ store(result, counter);
3069 if (notify) {
3070 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3071 __ logical_and(result, mask, result);
3072 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3073 // The bci for info can point to cmp for if's we want the if bci
3074 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3075 __ branch(lir_cond_equal, T_INT, overflow);
3076 __ branch_destination(overflow->continuation());
3077 }
3078 }
3080 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3081 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3082 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3084 if (x->pass_thread()) {
3085 signature->append(T_ADDRESS);
3086 args->append(getThreadPointer());
3087 }
3089 for (int i = 0; i < x->number_of_arguments(); i++) {
3090 Value a = x->argument_at(i);
3091 LIRItem* item = new LIRItem(a, this);
3092 item->load_item();
3093 args->append(item->result());
3094 signature->append(as_BasicType(a->type()));
3095 }
3097 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3098 if (x->type() == voidType) {
3099 set_no_result(x);
3100 } else {
3101 __ move(result, rlock_result(x));
3102 }
3103 }
3105 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3106 LIRItemList args(1);
3107 LIRItem value(arg1, this);
3108 args.append(&value);
3109 BasicTypeList signature;
3110 signature.append(as_BasicType(arg1->type()));
3112 return call_runtime(&signature, &args, entry, result_type, info);
3113 }
3116 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3117 LIRItemList args(2);
3118 LIRItem value1(arg1, this);
3119 LIRItem value2(arg2, this);
3120 args.append(&value1);
3121 args.append(&value2);
3122 BasicTypeList signature;
3123 signature.append(as_BasicType(arg1->type()));
3124 signature.append(as_BasicType(arg2->type()));
3126 return call_runtime(&signature, &args, entry, result_type, info);
3127 }
3130 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3131 address entry, ValueType* result_type, CodeEmitInfo* info) {
3132 // get a result register
3133 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3134 LIR_Opr result = LIR_OprFact::illegalOpr;
3135 if (result_type->tag() != voidTag) {
3136 result = new_register(result_type);
3137 phys_reg = result_register_for(result_type);
3138 }
3140 // move the arguments into the correct location
3141 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3142 assert(cc->length() == args->length(), "argument mismatch");
3143 for (int i = 0; i < args->length(); i++) {
3144 LIR_Opr arg = args->at(i);
3145 LIR_Opr loc = cc->at(i);
3146 if (loc->is_register()) {
3147 __ move(arg, loc);
3148 } else {
3149 LIR_Address* addr = loc->as_address_ptr();
3150 // if (!can_store_as_constant(arg)) {
3151 // LIR_Opr tmp = new_register(arg->type());
3152 // __ move(arg, tmp);
3153 // arg = tmp;
3154 // }
3155 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3156 __ unaligned_move(arg, addr);
3157 } else {
3158 __ move(arg, addr);
3159 }
3160 }
3161 }
3163 if (info) {
3164 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3165 } else {
3166 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3167 }
3168 if (result->is_valid()) {
3169 __ move(phys_reg, result);
3170 }
3171 return result;
3172 }
3175 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3176 address entry, ValueType* result_type, CodeEmitInfo* info) {
3177 // get a result register
3178 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3179 LIR_Opr result = LIR_OprFact::illegalOpr;
3180 if (result_type->tag() != voidTag) {
3181 result = new_register(result_type);
3182 phys_reg = result_register_for(result_type);
3183 }
3185 // move the arguments into the correct location
3186 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3188 assert(cc->length() == args->length(), "argument mismatch");
3189 for (int i = 0; i < args->length(); i++) {
3190 LIRItem* arg = args->at(i);
3191 LIR_Opr loc = cc->at(i);
3192 if (loc->is_register()) {
3193 arg->load_item_force(loc);
3194 } else {
3195 LIR_Address* addr = loc->as_address_ptr();
3196 arg->load_for_store(addr->type());
3197 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3198 __ unaligned_move(arg->result(), addr);
3199 } else {
3200 __ move(arg->result(), addr);
3201 }
3202 }
3203 }
3205 if (info) {
3206 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3207 } else {
3208 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3209 }
3210 if (result->is_valid()) {
3211 __ move(phys_reg, result);
3212 }
3213 return result;
3214 }
3216 void LIRGenerator::do_MemBar(MemBar* x) {
3217 if (os::is_MP()) {
3218 LIR_Code code = x->code();
3219 switch(code) {
3220 case lir_membar_acquire : __ membar_acquire(); break;
3221 case lir_membar_release : __ membar_release(); break;
3222 case lir_membar : __ membar(); break;
3223 case lir_membar_loadload : __ membar_loadload(); break;
3224 case lir_membar_storestore: __ membar_storestore(); break;
3225 case lir_membar_loadstore : __ membar_loadstore(); break;
3226 case lir_membar_storeload : __ membar_storeload(); break;
3227 default : ShouldNotReachHere(); break;
3228 }
3229 }
3230 }