Fri, 11 Mar 2011 22:34:57 -0800
7012648: move JSR 292 to package java.lang.invoke and adjust names
Summary: package and class renaming only; delete unused methods and classes
Reviewed-by: twisti
1 /*
2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciCPCache.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/bitMap.inline.hpp"
38 #ifndef SERIALGC
39 #include "gc_implementation/g1/heapRegion.hpp"
40 #endif
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
48 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
49 #ifdef ARM
50 #define PATCHED_ADDR (204)
51 #else
52 #define PATCHED_ADDR (max_jint)
53 #endif
55 void PhiResolverState::reset(int max_vregs) {
56 // Initialize array sizes
57 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
58 _virtual_operands.trunc_to(0);
59 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
60 _other_operands.trunc_to(0);
61 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
62 _vreg_table.trunc_to(0);
63 }
67 //--------------------------------------------------------------
68 // PhiResolver
70 // Resolves cycles:
71 //
72 // r1 := r2 becomes temp := r1
73 // r2 := r1 r1 := r2
74 // r2 := temp
75 // and orders moves:
76 //
77 // r2 := r3 becomes r1 := r2
78 // r1 := r2 r2 := r3
80 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
81 : _gen(gen)
82 , _state(gen->resolver_state())
83 , _temp(LIR_OprFact::illegalOpr)
84 {
85 // reinitialize the shared state arrays
86 _state.reset(max_vregs);
87 }
90 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
91 assert(src->is_valid(), "");
92 assert(dest->is_valid(), "");
93 __ move(src, dest);
94 }
97 void PhiResolver::move_temp_to(LIR_Opr dest) {
98 assert(_temp->is_valid(), "");
99 emit_move(_temp, dest);
100 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
101 }
104 void PhiResolver::move_to_temp(LIR_Opr src) {
105 assert(_temp->is_illegal(), "");
106 _temp = _gen->new_register(src->type());
107 emit_move(src, _temp);
108 }
111 // Traverse assignment graph in depth first order and generate moves in post order
112 // ie. two assignments: b := c, a := b start with node c:
113 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
114 // Generates moves in this order: move b to a and move c to b
115 // ie. cycle a := b, b := a start with node a
116 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
117 // Generates moves in this order: move b to temp, move a to b, move temp to a
118 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
119 if (!dest->visited()) {
120 dest->set_visited();
121 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
122 move(dest, dest->destination_at(i));
123 }
124 } else if (!dest->start_node()) {
125 // cylce in graph detected
126 assert(_loop == NULL, "only one loop valid!");
127 _loop = dest;
128 move_to_temp(src->operand());
129 return;
130 } // else dest is a start node
132 if (!dest->assigned()) {
133 if (_loop == dest) {
134 move_temp_to(dest->operand());
135 dest->set_assigned();
136 } else if (src != NULL) {
137 emit_move(src->operand(), dest->operand());
138 dest->set_assigned();
139 }
140 }
141 }
144 PhiResolver::~PhiResolver() {
145 int i;
146 // resolve any cycles in moves from and to virtual registers
147 for (i = virtual_operands().length() - 1; i >= 0; i --) {
148 ResolveNode* node = virtual_operands()[i];
149 if (!node->visited()) {
150 _loop = NULL;
151 move(NULL, node);
152 node->set_start_node();
153 assert(_temp->is_illegal(), "move_temp_to() call missing");
154 }
155 }
157 // generate move for move from non virtual register to abitrary destination
158 for (i = other_operands().length() - 1; i >= 0; i --) {
159 ResolveNode* node = other_operands()[i];
160 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
161 emit_move(node->operand(), node->destination_at(j)->operand());
162 }
163 }
164 }
167 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
168 ResolveNode* node;
169 if (opr->is_virtual()) {
170 int vreg_num = opr->vreg_number();
171 node = vreg_table().at_grow(vreg_num, NULL);
172 assert(node == NULL || node->operand() == opr, "");
173 if (node == NULL) {
174 node = new ResolveNode(opr);
175 vreg_table()[vreg_num] = node;
176 }
177 // Make sure that all virtual operands show up in the list when
178 // they are used as the source of a move.
179 if (source && !virtual_operands().contains(node)) {
180 virtual_operands().append(node);
181 }
182 } else {
183 assert(source, "");
184 node = new ResolveNode(opr);
185 other_operands().append(node);
186 }
187 return node;
188 }
191 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
192 assert(dest->is_virtual(), "");
193 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
194 assert(src->is_valid(), "");
195 assert(dest->is_valid(), "");
196 ResolveNode* source = source_node(src);
197 source->append(destination_node(dest));
198 }
201 //--------------------------------------------------------------
202 // LIRItem
204 void LIRItem::set_result(LIR_Opr opr) {
205 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
206 value()->set_operand(opr);
208 if (opr->is_virtual()) {
209 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
210 }
212 _result = opr;
213 }
215 void LIRItem::load_item() {
216 if (result()->is_illegal()) {
217 // update the items result
218 _result = value()->operand();
219 }
220 if (!result()->is_register()) {
221 LIR_Opr reg = _gen->new_register(value()->type());
222 __ move(result(), reg);
223 if (result()->is_constant()) {
224 _result = reg;
225 } else {
226 set_result(reg);
227 }
228 }
229 }
232 void LIRItem::load_for_store(BasicType type) {
233 if (_gen->can_store_as_constant(value(), type)) {
234 _result = value()->operand();
235 if (!_result->is_constant()) {
236 _result = LIR_OprFact::value_type(value()->type());
237 }
238 } else if (type == T_BYTE || type == T_BOOLEAN) {
239 load_byte_item();
240 } else {
241 load_item();
242 }
243 }
245 void LIRItem::load_item_force(LIR_Opr reg) {
246 LIR_Opr r = result();
247 if (r != reg) {
248 #if !defined(ARM) && !defined(E500V2)
249 if (r->type() != reg->type()) {
250 // moves between different types need an intervening spill slot
251 r = _gen->force_to_spill(r, reg->type());
252 }
253 #endif
254 __ move(r, reg);
255 _result = reg;
256 }
257 }
259 ciObject* LIRItem::get_jobject_constant() const {
260 ObjectType* oc = type()->as_ObjectType();
261 if (oc) {
262 return oc->constant_value();
263 }
264 return NULL;
265 }
268 jint LIRItem::get_jint_constant() const {
269 assert(is_constant() && value() != NULL, "");
270 assert(type()->as_IntConstant() != NULL, "type check");
271 return type()->as_IntConstant()->value();
272 }
275 jint LIRItem::get_address_constant() const {
276 assert(is_constant() && value() != NULL, "");
277 assert(type()->as_AddressConstant() != NULL, "type check");
278 return type()->as_AddressConstant()->value();
279 }
282 jfloat LIRItem::get_jfloat_constant() const {
283 assert(is_constant() && value() != NULL, "");
284 assert(type()->as_FloatConstant() != NULL, "type check");
285 return type()->as_FloatConstant()->value();
286 }
289 jdouble LIRItem::get_jdouble_constant() const {
290 assert(is_constant() && value() != NULL, "");
291 assert(type()->as_DoubleConstant() != NULL, "type check");
292 return type()->as_DoubleConstant()->value();
293 }
296 jlong LIRItem::get_jlong_constant() const {
297 assert(is_constant() && value() != NULL, "");
298 assert(type()->as_LongConstant() != NULL, "type check");
299 return type()->as_LongConstant()->value();
300 }
304 //--------------------------------------------------------------
307 void LIRGenerator::init() {
308 _bs = Universe::heap()->barrier_set();
309 }
312 void LIRGenerator::block_do_prolog(BlockBegin* block) {
313 #ifndef PRODUCT
314 if (PrintIRWithLIR) {
315 block->print();
316 }
317 #endif
319 // set up the list of LIR instructions
320 assert(block->lir() == NULL, "LIR list already computed for this block");
321 _lir = new LIR_List(compilation(), block);
322 block->set_lir(_lir);
324 __ branch_destination(block->label());
326 if (LIRTraceExecution &&
327 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
328 !block->is_set(BlockBegin::exception_entry_flag)) {
329 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
330 trace_block_entry(block);
331 }
332 }
335 void LIRGenerator::block_do_epilog(BlockBegin* block) {
336 #ifndef PRODUCT
337 if (PrintIRWithLIR) {
338 tty->cr();
339 }
340 #endif
342 // LIR_Opr for unpinned constants shouldn't be referenced by other
343 // blocks so clear them out after processing the block.
344 for (int i = 0; i < _unpinned_constants.length(); i++) {
345 _unpinned_constants.at(i)->clear_operand();
346 }
347 _unpinned_constants.trunc_to(0);
349 // clear our any registers for other local constants
350 _constants.trunc_to(0);
351 _reg_for_constants.trunc_to(0);
352 }
355 void LIRGenerator::block_do(BlockBegin* block) {
356 CHECK_BAILOUT();
358 block_do_prolog(block);
359 set_block(block);
361 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
362 if (instr->is_pinned()) do_root(instr);
363 }
365 set_block(NULL);
366 block_do_epilog(block);
367 }
370 //-------------------------LIRGenerator-----------------------------
372 // This is where the tree-walk starts; instr must be root;
373 void LIRGenerator::do_root(Value instr) {
374 CHECK_BAILOUT();
376 InstructionMark im(compilation(), instr);
378 assert(instr->is_pinned(), "use only with roots");
379 assert(instr->subst() == instr, "shouldn't have missed substitution");
381 instr->visit(this);
383 assert(!instr->has_uses() || instr->operand()->is_valid() ||
384 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
385 }
388 // This is called for each node in tree; the walk stops if a root is reached
389 void LIRGenerator::walk(Value instr) {
390 InstructionMark im(compilation(), instr);
391 //stop walk when encounter a root
392 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
393 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
394 } else {
395 assert(instr->subst() == instr, "shouldn't have missed substitution");
396 instr->visit(this);
397 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
398 }
399 }
402 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
403 assert(state != NULL, "state must be defined");
405 ValueStack* s = state;
406 for_each_state(s) {
407 if (s->kind() == ValueStack::EmptyExceptionState) {
408 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
409 continue;
410 }
412 int index;
413 Value value;
414 for_each_stack_value(s, index, value) {
415 assert(value->subst() == value, "missed substitution");
416 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
417 walk(value);
418 assert(value->operand()->is_valid(), "must be evaluated now");
419 }
420 }
422 int bci = s->bci();
423 IRScope* scope = s->scope();
424 ciMethod* method = scope->method();
426 MethodLivenessResult liveness = method->liveness_at_bci(bci);
427 if (bci == SynchronizationEntryBCI) {
428 if (x->as_ExceptionObject() || x->as_Throw()) {
429 // all locals are dead on exit from the synthetic unlocker
430 liveness.clear();
431 } else {
432 assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
433 }
434 }
435 if (!liveness.is_valid()) {
436 // Degenerate or breakpointed method.
437 bailout("Degenerate or breakpointed method");
438 } else {
439 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
440 for_each_local_value(s, index, value) {
441 assert(value->subst() == value, "missed substition");
442 if (liveness.at(index) && !value->type()->is_illegal()) {
443 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
444 walk(value);
445 assert(value->operand()->is_valid(), "must be evaluated now");
446 }
447 } else {
448 // NULL out this local so that linear scan can assume that all non-NULL values are live.
449 s->invalidate_local(index);
450 }
451 }
452 }
453 }
455 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
456 }
459 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
460 return state_for(x, x->exception_state());
461 }
464 void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
465 if (!obj->is_loaded() || PatchALot) {
466 assert(info != NULL, "info must be set if class is not loaded");
467 __ oop2reg_patch(NULL, r, info);
468 } else {
469 // no patching needed
470 __ oop2reg(obj->constant_encoding(), r);
471 }
472 }
475 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
476 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
477 CodeStub* stub = new RangeCheckStub(range_check_info, index);
478 if (index->is_constant()) {
479 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
480 index->as_jint(), null_check_info);
481 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
482 } else {
483 cmp_reg_mem(lir_cond_aboveEqual, index, array,
484 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
485 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
486 }
487 }
490 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
491 CodeStub* stub = new RangeCheckStub(info, index, true);
492 if (index->is_constant()) {
493 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
494 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
495 } else {
496 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
497 java_nio_Buffer::limit_offset(), T_INT, info);
498 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
499 }
500 __ move(index, result);
501 }
505 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
506 LIR_Opr result_op = result;
507 LIR_Opr left_op = left;
508 LIR_Opr right_op = right;
510 if (TwoOperandLIRForm && left_op != result_op) {
511 assert(right_op != result_op, "malformed");
512 __ move(left_op, result_op);
513 left_op = result_op;
514 }
516 switch(code) {
517 case Bytecodes::_dadd:
518 case Bytecodes::_fadd:
519 case Bytecodes::_ladd:
520 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
521 case Bytecodes::_fmul:
522 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
524 case Bytecodes::_dmul:
525 {
526 if (is_strictfp) {
527 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
528 } else {
529 __ mul(left_op, right_op, result_op); break;
530 }
531 }
532 break;
534 case Bytecodes::_imul:
535 {
536 bool did_strength_reduce = false;
538 if (right->is_constant()) {
539 int c = right->as_jint();
540 if (is_power_of_2(c)) {
541 // do not need tmp here
542 __ shift_left(left_op, exact_log2(c), result_op);
543 did_strength_reduce = true;
544 } else {
545 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
546 }
547 }
548 // we couldn't strength reduce so just emit the multiply
549 if (!did_strength_reduce) {
550 __ mul(left_op, right_op, result_op);
551 }
552 }
553 break;
555 case Bytecodes::_dsub:
556 case Bytecodes::_fsub:
557 case Bytecodes::_lsub:
558 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
560 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
561 // ldiv and lrem are implemented with a direct runtime call
563 case Bytecodes::_ddiv:
564 {
565 if (is_strictfp) {
566 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
567 } else {
568 __ div (left_op, right_op, result_op); break;
569 }
570 }
571 break;
573 case Bytecodes::_drem:
574 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
576 default: ShouldNotReachHere();
577 }
578 }
581 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
582 arithmetic_op(code, result, left, right, false, tmp);
583 }
586 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
587 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
588 }
591 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
592 arithmetic_op(code, result, left, right, is_strictfp, tmp);
593 }
596 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
597 if (TwoOperandLIRForm && value != result_op) {
598 assert(count != result_op, "malformed");
599 __ move(value, result_op);
600 value = result_op;
601 }
603 assert(count->is_constant() || count->is_register(), "must be");
604 switch(code) {
605 case Bytecodes::_ishl:
606 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
607 case Bytecodes::_ishr:
608 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
609 case Bytecodes::_iushr:
610 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
611 default: ShouldNotReachHere();
612 }
613 }
616 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
617 if (TwoOperandLIRForm && left_op != result_op) {
618 assert(right_op != result_op, "malformed");
619 __ move(left_op, result_op);
620 left_op = result_op;
621 }
623 switch(code) {
624 case Bytecodes::_iand:
625 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
627 case Bytecodes::_ior:
628 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
630 case Bytecodes::_ixor:
631 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
633 default: ShouldNotReachHere();
634 }
635 }
638 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
639 if (!GenerateSynchronizationCode) return;
640 // for slow path, use debug info for state after successful locking
641 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
642 __ load_stack_address_monitor(monitor_no, lock);
643 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
644 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
645 }
648 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
649 if (!GenerateSynchronizationCode) return;
650 // setup registers
651 LIR_Opr hdr = lock;
652 lock = new_hdr;
653 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
654 __ load_stack_address_monitor(monitor_no, lock);
655 __ unlock_object(hdr, object, lock, scratch, slow_path);
656 }
659 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
660 jobject2reg_with_patching(klass_reg, klass, info);
661 // If klass is not loaded we do not know if the klass has finalizers:
662 if (UseFastNewInstance && klass->is_loaded()
663 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
665 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
667 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
669 assert(klass->is_loaded(), "must be loaded");
670 // allocate space for instance
671 assert(klass->size_helper() >= 0, "illegal instance size");
672 const int instance_size = align_object_size(klass->size_helper());
673 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
674 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
675 } else {
676 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
677 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
678 __ branch_destination(slow_path->continuation());
679 }
680 }
683 static bool is_constant_zero(Instruction* inst) {
684 IntConstant* c = inst->type()->as_IntConstant();
685 if (c) {
686 return (c->value() == 0);
687 }
688 return false;
689 }
692 static bool positive_constant(Instruction* inst) {
693 IntConstant* c = inst->type()->as_IntConstant();
694 if (c) {
695 return (c->value() >= 0);
696 }
697 return false;
698 }
701 static ciArrayKlass* as_array_klass(ciType* type) {
702 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
703 return (ciArrayKlass*)type;
704 } else {
705 return NULL;
706 }
707 }
709 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
710 Instruction* src = x->argument_at(0);
711 Instruction* src_pos = x->argument_at(1);
712 Instruction* dst = x->argument_at(2);
713 Instruction* dst_pos = x->argument_at(3);
714 Instruction* length = x->argument_at(4);
716 // first try to identify the likely type of the arrays involved
717 ciArrayKlass* expected_type = NULL;
718 bool is_exact = false;
719 {
720 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
721 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
722 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
723 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
724 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
725 // the types exactly match so the type is fully known
726 is_exact = true;
727 expected_type = src_exact_type;
728 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
729 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
730 ciArrayKlass* src_type = NULL;
731 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
732 src_type = (ciArrayKlass*) src_exact_type;
733 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
734 src_type = (ciArrayKlass*) src_declared_type;
735 }
736 if (src_type != NULL) {
737 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
738 is_exact = true;
739 expected_type = dst_type;
740 }
741 }
742 }
743 // at least pass along a good guess
744 if (expected_type == NULL) expected_type = dst_exact_type;
745 if (expected_type == NULL) expected_type = src_declared_type;
746 if (expected_type == NULL) expected_type = dst_declared_type;
747 }
749 // if a probable array type has been identified, figure out if any
750 // of the required checks for a fast case can be elided.
751 int flags = LIR_OpArrayCopy::all_flags;
752 if (expected_type != NULL) {
753 // try to skip null checks
754 if (src->as_NewArray() != NULL)
755 flags &= ~LIR_OpArrayCopy::src_null_check;
756 if (dst->as_NewArray() != NULL)
757 flags &= ~LIR_OpArrayCopy::dst_null_check;
759 // check from incoming constant values
760 if (positive_constant(src_pos))
761 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
762 if (positive_constant(dst_pos))
763 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
764 if (positive_constant(length))
765 flags &= ~LIR_OpArrayCopy::length_positive_check;
767 // see if the range check can be elided, which might also imply
768 // that src or dst is non-null.
769 ArrayLength* al = length->as_ArrayLength();
770 if (al != NULL) {
771 if (al->array() == src) {
772 // it's the length of the source array
773 flags &= ~LIR_OpArrayCopy::length_positive_check;
774 flags &= ~LIR_OpArrayCopy::src_null_check;
775 if (is_constant_zero(src_pos))
776 flags &= ~LIR_OpArrayCopy::src_range_check;
777 }
778 if (al->array() == dst) {
779 // it's the length of the destination array
780 flags &= ~LIR_OpArrayCopy::length_positive_check;
781 flags &= ~LIR_OpArrayCopy::dst_null_check;
782 if (is_constant_zero(dst_pos))
783 flags &= ~LIR_OpArrayCopy::dst_range_check;
784 }
785 }
786 if (is_exact) {
787 flags &= ~LIR_OpArrayCopy::type_check;
788 }
789 }
791 if (src == dst) {
792 // moving within a single array so no type checks are needed
793 if (flags & LIR_OpArrayCopy::type_check) {
794 flags &= ~LIR_OpArrayCopy::type_check;
795 }
796 }
797 *flagsp = flags;
798 *expected_typep = (ciArrayKlass*)expected_type;
799 }
802 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
803 assert(opr->is_register(), "why spill if item is not register?");
805 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
806 LIR_Opr result = new_register(T_FLOAT);
807 set_vreg_flag(result, must_start_in_memory);
808 assert(opr->is_register(), "only a register can be spilled");
809 assert(opr->value_type()->is_float(), "rounding only for floats available");
810 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
811 return result;
812 }
813 return opr;
814 }
817 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
818 assert(type2size[t] == type2size[value->type()], "size mismatch");
819 if (!value->is_register()) {
820 // force into a register
821 LIR_Opr r = new_register(value->type());
822 __ move(value, r);
823 value = r;
824 }
826 // create a spill location
827 LIR_Opr tmp = new_register(t);
828 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
830 // move from register to spill
831 __ move(value, tmp);
832 return tmp;
833 }
835 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
836 if (if_instr->should_profile()) {
837 ciMethod* method = if_instr->profiled_method();
838 assert(method != NULL, "method should be set if branch is profiled");
839 ciMethodData* md = method->method_data_or_null();
840 assert(md != NULL, "Sanity");
841 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
842 assert(data != NULL, "must have profiling data");
843 assert(data->is_BranchData(), "need BranchData for two-way branches");
844 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
845 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
846 if (if_instr->is_swapped()) {
847 int t = taken_count_offset;
848 taken_count_offset = not_taken_count_offset;
849 not_taken_count_offset = t;
850 }
852 LIR_Opr md_reg = new_register(T_OBJECT);
853 __ oop2reg(md->constant_encoding(), md_reg);
855 LIR_Opr data_offset_reg = new_pointer_register();
856 __ cmove(lir_cond(cond),
857 LIR_OprFact::intptrConst(taken_count_offset),
858 LIR_OprFact::intptrConst(not_taken_count_offset),
859 data_offset_reg, as_BasicType(if_instr->x()->type()));
861 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
862 LIR_Opr data_reg = new_pointer_register();
863 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
864 __ move(data_addr, data_reg);
865 // Use leal instead of add to avoid destroying condition codes on x86
866 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
867 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
868 __ move(data_reg, data_addr);
869 }
870 }
872 // Phi technique:
873 // This is about passing live values from one basic block to the other.
874 // In code generated with Java it is rather rare that more than one
875 // value is on the stack from one basic block to the other.
876 // We optimize our technique for efficient passing of one value
877 // (of type long, int, double..) but it can be extended.
878 // When entering or leaving a basic block, all registers and all spill
879 // slots are release and empty. We use the released registers
880 // and spill slots to pass the live values from one block
881 // to the other. The topmost value, i.e., the value on TOS of expression
882 // stack is passed in registers. All other values are stored in spilling
883 // area. Every Phi has an index which designates its spill slot
884 // At exit of a basic block, we fill the register(s) and spill slots.
885 // At entry of a basic block, the block_prolog sets up the content of phi nodes
886 // and locks necessary registers and spilling slots.
889 // move current value to referenced phi function
890 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
891 Phi* phi = sux_val->as_Phi();
892 // cur_val can be null without phi being null in conjunction with inlining
893 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
894 LIR_Opr operand = cur_val->operand();
895 if (cur_val->operand()->is_illegal()) {
896 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
897 "these can be produced lazily");
898 operand = operand_for_instruction(cur_val);
899 }
900 resolver->move(operand, operand_for_instruction(phi));
901 }
902 }
905 // Moves all stack values into their PHI position
906 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
907 BlockBegin* bb = block();
908 if (bb->number_of_sux() == 1) {
909 BlockBegin* sux = bb->sux_at(0);
910 assert(sux->number_of_preds() > 0, "invalid CFG");
912 // a block with only one predecessor never has phi functions
913 if (sux->number_of_preds() > 1) {
914 int max_phis = cur_state->stack_size() + cur_state->locals_size();
915 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
917 ValueStack* sux_state = sux->state();
918 Value sux_value;
919 int index;
921 assert(cur_state->scope() == sux_state->scope(), "not matching");
922 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
923 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
925 for_each_stack_value(sux_state, index, sux_value) {
926 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
927 }
929 for_each_local_value(sux_state, index, sux_value) {
930 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
931 }
933 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
934 }
935 }
936 }
939 LIR_Opr LIRGenerator::new_register(BasicType type) {
940 int vreg = _virtual_register_number;
941 // add a little fudge factor for the bailout, since the bailout is
942 // only checked periodically. This gives a few extra registers to
943 // hand out before we really run out, which helps us keep from
944 // tripping over assertions.
945 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
946 bailout("out of virtual registers");
947 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
948 // wrap it around
949 _virtual_register_number = LIR_OprDesc::vreg_base;
950 }
951 }
952 _virtual_register_number += 1;
953 return LIR_OprFact::virtual_register(vreg, type);
954 }
957 // Try to lock using register in hint
958 LIR_Opr LIRGenerator::rlock(Value instr) {
959 return new_register(instr->type());
960 }
963 // does an rlock and sets result
964 LIR_Opr LIRGenerator::rlock_result(Value x) {
965 LIR_Opr reg = rlock(x);
966 set_result(x, reg);
967 return reg;
968 }
971 // does an rlock and sets result
972 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
973 LIR_Opr reg;
974 switch (type) {
975 case T_BYTE:
976 case T_BOOLEAN:
977 reg = rlock_byte(type);
978 break;
979 default:
980 reg = rlock(x);
981 break;
982 }
984 set_result(x, reg);
985 return reg;
986 }
989 //---------------------------------------------------------------------
990 ciObject* LIRGenerator::get_jobject_constant(Value value) {
991 ObjectType* oc = value->type()->as_ObjectType();
992 if (oc) {
993 return oc->constant_value();
994 }
995 return NULL;
996 }
999 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1000 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1001 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1003 // no moves are created for phi functions at the begin of exception
1004 // handlers, so assign operands manually here
1005 for_each_phi_fun(block(), phi,
1006 operand_for_instruction(phi));
1008 LIR_Opr thread_reg = getThreadPointer();
1009 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1010 exceptionOopOpr());
1011 __ move_wide(LIR_OprFact::oopConst(NULL),
1012 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1013 __ move_wide(LIR_OprFact::oopConst(NULL),
1014 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1016 LIR_Opr result = new_register(T_OBJECT);
1017 __ move(exceptionOopOpr(), result);
1018 set_result(x, result);
1019 }
1022 //----------------------------------------------------------------------
1023 //----------------------------------------------------------------------
1024 //----------------------------------------------------------------------
1025 //----------------------------------------------------------------------
1026 // visitor functions
1027 //----------------------------------------------------------------------
1028 //----------------------------------------------------------------------
1029 //----------------------------------------------------------------------
1030 //----------------------------------------------------------------------
1032 void LIRGenerator::do_Phi(Phi* x) {
1033 // phi functions are never visited directly
1034 ShouldNotReachHere();
1035 }
1038 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1039 void LIRGenerator::do_Constant(Constant* x) {
1040 if (x->state_before() != NULL) {
1041 // Any constant with a ValueStack requires patching so emit the patch here
1042 LIR_Opr reg = rlock_result(x);
1043 CodeEmitInfo* info = state_for(x, x->state_before());
1044 __ oop2reg_patch(NULL, reg, info);
1045 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1046 if (!x->is_pinned()) {
1047 // unpinned constants are handled specially so that they can be
1048 // put into registers when they are used multiple times within a
1049 // block. After the block completes their operand will be
1050 // cleared so that other blocks can't refer to that register.
1051 set_result(x, load_constant(x));
1052 } else {
1053 LIR_Opr res = x->operand();
1054 if (!res->is_valid()) {
1055 res = LIR_OprFact::value_type(x->type());
1056 }
1057 if (res->is_constant()) {
1058 LIR_Opr reg = rlock_result(x);
1059 __ move(res, reg);
1060 } else {
1061 set_result(x, res);
1062 }
1063 }
1064 } else {
1065 set_result(x, LIR_OprFact::value_type(x->type()));
1066 }
1067 }
1070 void LIRGenerator::do_Local(Local* x) {
1071 // operand_for_instruction has the side effect of setting the result
1072 // so there's no need to do it here.
1073 operand_for_instruction(x);
1074 }
1077 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1078 Unimplemented();
1079 }
1082 void LIRGenerator::do_Return(Return* x) {
1083 if (compilation()->env()->dtrace_method_probes()) {
1084 BasicTypeList signature;
1085 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1086 signature.append(T_OBJECT); // methodOop
1087 LIR_OprList* args = new LIR_OprList();
1088 args->append(getThreadPointer());
1089 LIR_Opr meth = new_register(T_OBJECT);
1090 __ oop2reg(method()->constant_encoding(), meth);
1091 args->append(meth);
1092 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1093 }
1095 if (x->type()->is_void()) {
1096 __ return_op(LIR_OprFact::illegalOpr);
1097 } else {
1098 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1099 LIRItem result(x->result(), this);
1101 result.load_item_force(reg);
1102 __ return_op(result.result());
1103 }
1104 set_no_result(x);
1105 }
1108 // Example: object.getClass ()
1109 void LIRGenerator::do_getClass(Intrinsic* x) {
1110 assert(x->number_of_arguments() == 1, "wrong type");
1112 LIRItem rcvr(x->argument_at(0), this);
1113 rcvr.load_item();
1114 LIR_Opr result = rlock_result(x);
1116 // need to perform the null check on the rcvr
1117 CodeEmitInfo* info = NULL;
1118 if (x->needs_null_check()) {
1119 info = state_for(x);
1120 }
1121 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
1122 __ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
1123 klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
1124 }
1127 // Example: Thread.currentThread()
1128 void LIRGenerator::do_currentThread(Intrinsic* x) {
1129 assert(x->number_of_arguments() == 0, "wrong type");
1130 LIR_Opr reg = rlock_result(x);
1131 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1132 }
1135 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1136 assert(x->number_of_arguments() == 1, "wrong type");
1137 LIRItem receiver(x->argument_at(0), this);
1139 receiver.load_item();
1140 BasicTypeList signature;
1141 signature.append(T_OBJECT); // receiver
1142 LIR_OprList* args = new LIR_OprList();
1143 args->append(receiver.result());
1144 CodeEmitInfo* info = state_for(x, x->state());
1145 call_runtime(&signature, args,
1146 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1147 voidType, info);
1149 set_no_result(x);
1150 }
1153 //------------------------local access--------------------------------------
1155 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1156 if (x->operand()->is_illegal()) {
1157 Constant* c = x->as_Constant();
1158 if (c != NULL) {
1159 x->set_operand(LIR_OprFact::value_type(c->type()));
1160 } else {
1161 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1162 // allocate a virtual register for this local or phi
1163 x->set_operand(rlock(x));
1164 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1165 }
1166 }
1167 return x->operand();
1168 }
1171 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1172 if (opr->is_virtual()) {
1173 return instruction_for_vreg(opr->vreg_number());
1174 }
1175 return NULL;
1176 }
1179 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1180 if (reg_num < _instruction_for_operand.length()) {
1181 return _instruction_for_operand.at(reg_num);
1182 }
1183 return NULL;
1184 }
1187 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1188 if (_vreg_flags.size_in_bits() == 0) {
1189 BitMap2D temp(100, num_vreg_flags);
1190 temp.clear();
1191 _vreg_flags = temp;
1192 }
1193 _vreg_flags.at_put_grow(vreg_num, f, true);
1194 }
1196 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1197 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1198 return false;
1199 }
1200 return _vreg_flags.at(vreg_num, f);
1201 }
1204 // Block local constant handling. This code is useful for keeping
1205 // unpinned constants and constants which aren't exposed in the IR in
1206 // registers. Unpinned Constant instructions have their operands
1207 // cleared when the block is finished so that other blocks can't end
1208 // up referring to their registers.
1210 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1211 assert(!x->is_pinned(), "only for unpinned constants");
1212 _unpinned_constants.append(x);
1213 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1214 }
1217 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1218 BasicType t = c->type();
1219 for (int i = 0; i < _constants.length(); i++) {
1220 LIR_Const* other = _constants.at(i);
1221 if (t == other->type()) {
1222 switch (t) {
1223 case T_INT:
1224 case T_FLOAT:
1225 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1226 break;
1227 case T_LONG:
1228 case T_DOUBLE:
1229 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1230 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1231 break;
1232 case T_OBJECT:
1233 if (c->as_jobject() != other->as_jobject()) continue;
1234 break;
1235 }
1236 return _reg_for_constants.at(i);
1237 }
1238 }
1240 LIR_Opr result = new_register(t);
1241 __ move((LIR_Opr)c, result);
1242 _constants.append(c);
1243 _reg_for_constants.append(result);
1244 return result;
1245 }
1247 // Various barriers
1249 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
1250 // Do the pre-write barrier, if any.
1251 switch (_bs->kind()) {
1252 #ifndef SERIALGC
1253 case BarrierSet::G1SATBCT:
1254 case BarrierSet::G1SATBCTLogging:
1255 G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
1256 break;
1257 #endif // SERIALGC
1258 case BarrierSet::CardTableModRef:
1259 case BarrierSet::CardTableExtension:
1260 // No pre barriers
1261 break;
1262 case BarrierSet::ModRef:
1263 case BarrierSet::Other:
1264 // No pre barriers
1265 break;
1266 default :
1267 ShouldNotReachHere();
1269 }
1270 }
1272 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1273 switch (_bs->kind()) {
1274 #ifndef SERIALGC
1275 case BarrierSet::G1SATBCT:
1276 case BarrierSet::G1SATBCTLogging:
1277 G1SATBCardTableModRef_post_barrier(addr, new_val);
1278 break;
1279 #endif // SERIALGC
1280 case BarrierSet::CardTableModRef:
1281 case BarrierSet::CardTableExtension:
1282 CardTableModRef_post_barrier(addr, new_val);
1283 break;
1284 case BarrierSet::ModRef:
1285 case BarrierSet::Other:
1286 // No post barriers
1287 break;
1288 default :
1289 ShouldNotReachHere();
1290 }
1291 }
1293 ////////////////////////////////////////////////////////////////////////
1294 #ifndef SERIALGC
1296 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
1297 if (G1DisablePreBarrier) return;
1299 // First we test whether marking is in progress.
1300 BasicType flag_type;
1301 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1302 flag_type = T_INT;
1303 } else {
1304 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1305 "Assumption");
1306 flag_type = T_BYTE;
1307 }
1308 LIR_Opr thrd = getThreadPointer();
1309 LIR_Address* mark_active_flag_addr =
1310 new LIR_Address(thrd,
1311 in_bytes(JavaThread::satb_mark_queue_offset() +
1312 PtrQueue::byte_offset_of_active()),
1313 flag_type);
1314 // Read the marking-in-progress flag.
1315 LIR_Opr flag_val = new_register(T_INT);
1316 __ load(mark_active_flag_addr, flag_val);
1318 LIR_PatchCode pre_val_patch_code =
1319 patch ? lir_patch_normal : lir_patch_none;
1321 LIR_Opr pre_val = new_register(T_OBJECT);
1323 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1324 if (!addr_opr->is_address()) {
1325 assert(addr_opr->is_register(), "must be");
1326 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1327 }
1328 CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
1329 info);
1330 __ branch(lir_cond_notEqual, T_INT, slow);
1331 __ branch_destination(slow->continuation());
1332 }
1334 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1335 if (G1DisablePostBarrier) return;
1337 // If the "new_val" is a constant NULL, no barrier is necessary.
1338 if (new_val->is_constant() &&
1339 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1341 if (!new_val->is_register()) {
1342 LIR_Opr new_val_reg = new_register(T_OBJECT);
1343 if (new_val->is_constant()) {
1344 __ move(new_val, new_val_reg);
1345 } else {
1346 __ leal(new_val, new_val_reg);
1347 }
1348 new_val = new_val_reg;
1349 }
1350 assert(new_val->is_register(), "must be a register at this point");
1352 if (addr->is_address()) {
1353 LIR_Address* address = addr->as_address_ptr();
1354 LIR_Opr ptr = new_register(T_OBJECT);
1355 if (!address->index()->is_valid() && address->disp() == 0) {
1356 __ move(address->base(), ptr);
1357 } else {
1358 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1359 __ leal(addr, ptr);
1360 }
1361 addr = ptr;
1362 }
1363 assert(addr->is_register(), "must be a register at this point");
1365 LIR_Opr xor_res = new_pointer_register();
1366 LIR_Opr xor_shift_res = new_pointer_register();
1367 if (TwoOperandLIRForm ) {
1368 __ move(addr, xor_res);
1369 __ logical_xor(xor_res, new_val, xor_res);
1370 __ move(xor_res, xor_shift_res);
1371 __ unsigned_shift_right(xor_shift_res,
1372 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1373 xor_shift_res,
1374 LIR_OprDesc::illegalOpr());
1375 } else {
1376 __ logical_xor(addr, new_val, xor_res);
1377 __ unsigned_shift_right(xor_res,
1378 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1379 xor_shift_res,
1380 LIR_OprDesc::illegalOpr());
1381 }
1383 if (!new_val->is_register()) {
1384 LIR_Opr new_val_reg = new_register(T_OBJECT);
1385 __ leal(new_val, new_val_reg);
1386 new_val = new_val_reg;
1387 }
1388 assert(new_val->is_register(), "must be a register at this point");
1390 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1392 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1393 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1394 __ branch_destination(slow->continuation());
1395 }
1397 #endif // SERIALGC
1398 ////////////////////////////////////////////////////////////////////////
1400 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1402 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1403 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1404 if (addr->is_address()) {
1405 LIR_Address* address = addr->as_address_ptr();
1406 LIR_Opr ptr = new_register(T_OBJECT);
1407 if (!address->index()->is_valid() && address->disp() == 0) {
1408 __ move(address->base(), ptr);
1409 } else {
1410 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1411 __ leal(addr, ptr);
1412 }
1413 addr = ptr;
1414 }
1415 assert(addr->is_register(), "must be a register at this point");
1417 #ifdef ARM
1418 // TODO: ARM - move to platform-dependent code
1419 LIR_Opr tmp = FrameMap::R14_opr;
1420 if (VM_Version::supports_movw()) {
1421 __ move((LIR_Opr)card_table_base, tmp);
1422 } else {
1423 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1424 }
1426 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1427 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1428 if(((int)ct->byte_map_base & 0xff) == 0) {
1429 __ move(tmp, card_addr);
1430 } else {
1431 LIR_Opr tmp_zero = new_register(T_INT);
1432 __ move(LIR_OprFact::intConst(0), tmp_zero);
1433 __ move(tmp_zero, card_addr);
1434 }
1435 #else // ARM
1436 LIR_Opr tmp = new_pointer_register();
1437 if (TwoOperandLIRForm) {
1438 __ move(addr, tmp);
1439 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1440 } else {
1441 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1442 }
1443 if (can_inline_as_constant(card_table_base)) {
1444 __ move(LIR_OprFact::intConst(0),
1445 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1446 } else {
1447 __ move(LIR_OprFact::intConst(0),
1448 new LIR_Address(tmp, load_constant(card_table_base),
1449 T_BYTE));
1450 }
1451 #endif // ARM
1452 }
1455 //------------------------field access--------------------------------------
1457 // Comment copied form templateTable_i486.cpp
1458 // ----------------------------------------------------------------------------
1459 // Volatile variables demand their effects be made known to all CPU's in
1460 // order. Store buffers on most chips allow reads & writes to reorder; the
1461 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1462 // memory barrier (i.e., it's not sufficient that the interpreter does not
1463 // reorder volatile references, the hardware also must not reorder them).
1464 //
1465 // According to the new Java Memory Model (JMM):
1466 // (1) All volatiles are serialized wrt to each other.
1467 // ALSO reads & writes act as aquire & release, so:
1468 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1469 // the read float up to before the read. It's OK for non-volatile memory refs
1470 // that happen before the volatile read to float down below it.
1471 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1472 // that happen BEFORE the write float down to after the write. It's OK for
1473 // non-volatile memory refs that happen after the volatile write to float up
1474 // before it.
1475 //
1476 // We only put in barriers around volatile refs (they are expensive), not
1477 // _between_ memory refs (that would require us to track the flavor of the
1478 // previous memory refs). Requirements (2) and (3) require some barriers
1479 // before volatile stores and after volatile loads. These nearly cover
1480 // requirement (1) but miss the volatile-store-volatile-load case. This final
1481 // case is placed after volatile-stores although it could just as well go
1482 // before volatile-loads.
1485 void LIRGenerator::do_StoreField(StoreField* x) {
1486 bool needs_patching = x->needs_patching();
1487 bool is_volatile = x->field()->is_volatile();
1488 BasicType field_type = x->field_type();
1489 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1491 CodeEmitInfo* info = NULL;
1492 if (needs_patching) {
1493 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1494 info = state_for(x, x->state_before());
1495 } else if (x->needs_null_check()) {
1496 NullCheck* nc = x->explicit_null_check();
1497 if (nc == NULL) {
1498 info = state_for(x);
1499 } else {
1500 info = state_for(nc);
1501 }
1502 }
1505 LIRItem object(x->obj(), this);
1506 LIRItem value(x->value(), this);
1508 object.load_item();
1510 if (is_volatile || needs_patching) {
1511 // load item if field is volatile (fewer special cases for volatiles)
1512 // load item if field not initialized
1513 // load item if field not constant
1514 // because of code patching we cannot inline constants
1515 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1516 value.load_byte_item();
1517 } else {
1518 value.load_item();
1519 }
1520 } else {
1521 value.load_for_store(field_type);
1522 }
1524 set_no_result(x);
1526 #ifndef PRODUCT
1527 if (PrintNotLoaded && needs_patching) {
1528 tty->print_cr(" ###class not loaded at store_%s bci %d",
1529 x->is_static() ? "static" : "field", x->printable_bci());
1530 }
1531 #endif
1533 if (x->needs_null_check() &&
1534 (needs_patching ||
1535 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1536 // emit an explicit null check because the offset is too large
1537 __ null_check(object.result(), new CodeEmitInfo(info));
1538 }
1540 LIR_Address* address;
1541 if (needs_patching) {
1542 // we need to patch the offset in the instruction so don't allow
1543 // generate_address to try to be smart about emitting the -1.
1544 // Otherwise the patching code won't know how to find the
1545 // instruction to patch.
1546 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1547 } else {
1548 address = generate_address(object.result(), x->offset(), field_type);
1549 }
1551 if (is_volatile && os::is_MP()) {
1552 __ membar_release();
1553 }
1555 if (is_oop) {
1556 // Do the pre-write barrier, if any.
1557 pre_barrier(LIR_OprFact::address(address),
1558 needs_patching,
1559 (info ? new CodeEmitInfo(info) : NULL));
1560 }
1562 if (is_volatile && !needs_patching) {
1563 volatile_field_store(value.result(), address, info);
1564 } else {
1565 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1566 __ store(value.result(), address, info, patch_code);
1567 }
1569 if (is_oop) {
1570 // Store to object so mark the card of the header
1571 post_barrier(object.result(), value.result());
1572 }
1574 if (is_volatile && os::is_MP()) {
1575 __ membar();
1576 }
1577 }
1580 void LIRGenerator::do_LoadField(LoadField* x) {
1581 bool needs_patching = x->needs_patching();
1582 bool is_volatile = x->field()->is_volatile();
1583 BasicType field_type = x->field_type();
1585 CodeEmitInfo* info = NULL;
1586 if (needs_patching) {
1587 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1588 info = state_for(x, x->state_before());
1589 } else if (x->needs_null_check()) {
1590 NullCheck* nc = x->explicit_null_check();
1591 if (nc == NULL) {
1592 info = state_for(x);
1593 } else {
1594 info = state_for(nc);
1595 }
1596 }
1598 LIRItem object(x->obj(), this);
1600 object.load_item();
1602 #ifndef PRODUCT
1603 if (PrintNotLoaded && needs_patching) {
1604 tty->print_cr(" ###class not loaded at load_%s bci %d",
1605 x->is_static() ? "static" : "field", x->printable_bci());
1606 }
1607 #endif
1609 if (x->needs_null_check() &&
1610 (needs_patching ||
1611 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1612 // emit an explicit null check because the offset is too large
1613 __ null_check(object.result(), new CodeEmitInfo(info));
1614 }
1616 LIR_Opr reg = rlock_result(x, field_type);
1617 LIR_Address* address;
1618 if (needs_patching) {
1619 // we need to patch the offset in the instruction so don't allow
1620 // generate_address to try to be smart about emitting the -1.
1621 // Otherwise the patching code won't know how to find the
1622 // instruction to patch.
1623 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1624 } else {
1625 address = generate_address(object.result(), x->offset(), field_type);
1626 }
1628 if (is_volatile && !needs_patching) {
1629 volatile_field_load(address, reg, info);
1630 } else {
1631 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1632 __ load(address, reg, info, patch_code);
1633 }
1635 if (is_volatile && os::is_MP()) {
1636 __ membar_acquire();
1637 }
1638 }
1641 //------------------------java.nio.Buffer.checkIndex------------------------
1643 // int java.nio.Buffer.checkIndex(int)
1644 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1645 // NOTE: by the time we are in checkIndex() we are guaranteed that
1646 // the buffer is non-null (because checkIndex is package-private and
1647 // only called from within other methods in the buffer).
1648 assert(x->number_of_arguments() == 2, "wrong type");
1649 LIRItem buf (x->argument_at(0), this);
1650 LIRItem index(x->argument_at(1), this);
1651 buf.load_item();
1652 index.load_item();
1654 LIR_Opr result = rlock_result(x);
1655 if (GenerateRangeChecks) {
1656 CodeEmitInfo* info = state_for(x);
1657 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1658 if (index.result()->is_constant()) {
1659 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1660 __ branch(lir_cond_belowEqual, T_INT, stub);
1661 } else {
1662 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1663 java_nio_Buffer::limit_offset(), T_INT, info);
1664 __ branch(lir_cond_aboveEqual, T_INT, stub);
1665 }
1666 __ move(index.result(), result);
1667 } else {
1668 // Just load the index into the result register
1669 __ move(index.result(), result);
1670 }
1671 }
1674 //------------------------array access--------------------------------------
1677 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1678 LIRItem array(x->array(), this);
1679 array.load_item();
1680 LIR_Opr reg = rlock_result(x);
1682 CodeEmitInfo* info = NULL;
1683 if (x->needs_null_check()) {
1684 NullCheck* nc = x->explicit_null_check();
1685 if (nc == NULL) {
1686 info = state_for(x);
1687 } else {
1688 info = state_for(nc);
1689 }
1690 }
1691 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1692 }
1695 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1696 bool use_length = x->length() != NULL;
1697 LIRItem array(x->array(), this);
1698 LIRItem index(x->index(), this);
1699 LIRItem length(this);
1700 bool needs_range_check = true;
1702 if (use_length) {
1703 needs_range_check = x->compute_needs_range_check();
1704 if (needs_range_check) {
1705 length.set_instruction(x->length());
1706 length.load_item();
1707 }
1708 }
1710 array.load_item();
1711 if (index.is_constant() && can_inline_as_constant(x->index())) {
1712 // let it be a constant
1713 index.dont_load_item();
1714 } else {
1715 index.load_item();
1716 }
1718 CodeEmitInfo* range_check_info = state_for(x);
1719 CodeEmitInfo* null_check_info = NULL;
1720 if (x->needs_null_check()) {
1721 NullCheck* nc = x->explicit_null_check();
1722 if (nc != NULL) {
1723 null_check_info = state_for(nc);
1724 } else {
1725 null_check_info = range_check_info;
1726 }
1727 }
1729 // emit array address setup early so it schedules better
1730 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1732 if (GenerateRangeChecks && needs_range_check) {
1733 if (use_length) {
1734 // TODO: use a (modified) version of array_range_check that does not require a
1735 // constant length to be loaded to a register
1736 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1737 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1738 } else {
1739 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1740 // The range check performs the null check, so clear it out for the load
1741 null_check_info = NULL;
1742 }
1743 }
1745 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1746 }
1749 void LIRGenerator::do_NullCheck(NullCheck* x) {
1750 if (x->can_trap()) {
1751 LIRItem value(x->obj(), this);
1752 value.load_item();
1753 CodeEmitInfo* info = state_for(x);
1754 __ null_check(value.result(), info);
1755 }
1756 }
1759 void LIRGenerator::do_Throw(Throw* x) {
1760 LIRItem exception(x->exception(), this);
1761 exception.load_item();
1762 set_no_result(x);
1763 LIR_Opr exception_opr = exception.result();
1764 CodeEmitInfo* info = state_for(x, x->state());
1766 #ifndef PRODUCT
1767 if (PrintC1Statistics) {
1768 increment_counter(Runtime1::throw_count_address(), T_INT);
1769 }
1770 #endif
1772 // check if the instruction has an xhandler in any of the nested scopes
1773 bool unwind = false;
1774 if (info->exception_handlers()->length() == 0) {
1775 // this throw is not inside an xhandler
1776 unwind = true;
1777 } else {
1778 // get some idea of the throw type
1779 bool type_is_exact = true;
1780 ciType* throw_type = x->exception()->exact_type();
1781 if (throw_type == NULL) {
1782 type_is_exact = false;
1783 throw_type = x->exception()->declared_type();
1784 }
1785 if (throw_type != NULL && throw_type->is_instance_klass()) {
1786 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1787 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1788 }
1789 }
1791 // do null check before moving exception oop into fixed register
1792 // to avoid a fixed interval with an oop during the null check.
1793 // Use a copy of the CodeEmitInfo because debug information is
1794 // different for null_check and throw.
1795 if (GenerateCompilerNullChecks &&
1796 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1797 // if the exception object wasn't created using new then it might be null.
1798 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1799 }
1801 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1802 // we need to go through the exception lookup path to get JVMTI
1803 // notification done
1804 unwind = false;
1805 }
1807 // move exception oop into fixed register
1808 __ move(exception_opr, exceptionOopOpr());
1810 if (unwind) {
1811 __ unwind_exception(exceptionOopOpr());
1812 } else {
1813 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
1814 }
1815 }
1818 void LIRGenerator::do_RoundFP(RoundFP* x) {
1819 LIRItem input(x->input(), this);
1820 input.load_item();
1821 LIR_Opr input_opr = input.result();
1822 assert(input_opr->is_register(), "why round if value is not in a register?");
1823 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
1824 if (input_opr->is_single_fpu()) {
1825 set_result(x, round_item(input_opr)); // This code path not currently taken
1826 } else {
1827 LIR_Opr result = new_register(T_DOUBLE);
1828 set_vreg_flag(result, must_start_in_memory);
1829 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
1830 set_result(x, result);
1831 }
1832 }
1834 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
1835 LIRItem base(x->base(), this);
1836 LIRItem idx(this);
1838 base.load_item();
1839 if (x->has_index()) {
1840 idx.set_instruction(x->index());
1841 idx.load_nonconstant();
1842 }
1844 LIR_Opr reg = rlock_result(x, x->basic_type());
1846 int log2_scale = 0;
1847 if (x->has_index()) {
1848 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
1849 log2_scale = x->log2_scale();
1850 }
1852 assert(!x->has_index() || idx.value() == x->index(), "should match");
1854 LIR_Opr base_op = base.result();
1855 #ifndef _LP64
1856 if (x->base()->type()->tag() == longTag) {
1857 base_op = new_register(T_INT);
1858 __ convert(Bytecodes::_l2i, base.result(), base_op);
1859 } else {
1860 assert(x->base()->type()->tag() == intTag, "must be");
1861 }
1862 #endif
1864 BasicType dst_type = x->basic_type();
1865 LIR_Opr index_op = idx.result();
1867 LIR_Address* addr;
1868 if (index_op->is_constant()) {
1869 assert(log2_scale == 0, "must not have a scale");
1870 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
1871 } else {
1872 #ifdef X86
1873 #ifdef _LP64
1874 if (!index_op->is_illegal() && index_op->type() == T_INT) {
1875 LIR_Opr tmp = new_pointer_register();
1876 __ convert(Bytecodes::_i2l, index_op, tmp);
1877 index_op = tmp;
1878 }
1879 #endif
1880 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
1881 #elif defined(ARM)
1882 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
1883 #else
1884 if (index_op->is_illegal() || log2_scale == 0) {
1885 #ifdef _LP64
1886 if (!index_op->is_illegal() && index_op->type() == T_INT) {
1887 LIR_Opr tmp = new_pointer_register();
1888 __ convert(Bytecodes::_i2l, index_op, tmp);
1889 index_op = tmp;
1890 }
1891 #endif
1892 addr = new LIR_Address(base_op, index_op, dst_type);
1893 } else {
1894 LIR_Opr tmp = new_pointer_register();
1895 __ shift_left(index_op, log2_scale, tmp);
1896 addr = new LIR_Address(base_op, tmp, dst_type);
1897 }
1898 #endif
1899 }
1901 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
1902 __ unaligned_move(addr, reg);
1903 } else {
1904 if (dst_type == T_OBJECT && x->is_wide()) {
1905 __ move_wide(addr, reg);
1906 } else {
1907 __ move(addr, reg);
1908 }
1909 }
1910 }
1913 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
1914 int log2_scale = 0;
1915 BasicType type = x->basic_type();
1917 if (x->has_index()) {
1918 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
1919 log2_scale = x->log2_scale();
1920 }
1922 LIRItem base(x->base(), this);
1923 LIRItem value(x->value(), this);
1924 LIRItem idx(this);
1926 base.load_item();
1927 if (x->has_index()) {
1928 idx.set_instruction(x->index());
1929 idx.load_item();
1930 }
1932 if (type == T_BYTE || type == T_BOOLEAN) {
1933 value.load_byte_item();
1934 } else {
1935 value.load_item();
1936 }
1938 set_no_result(x);
1940 LIR_Opr base_op = base.result();
1941 #ifndef _LP64
1942 if (x->base()->type()->tag() == longTag) {
1943 base_op = new_register(T_INT);
1944 __ convert(Bytecodes::_l2i, base.result(), base_op);
1945 } else {
1946 assert(x->base()->type()->tag() == intTag, "must be");
1947 }
1948 #endif
1950 LIR_Opr index_op = idx.result();
1951 if (log2_scale != 0) {
1952 // temporary fix (platform dependent code without shift on Intel would be better)
1953 index_op = new_pointer_register();
1954 #ifdef _LP64
1955 if(idx.result()->type() == T_INT) {
1956 __ convert(Bytecodes::_i2l, idx.result(), index_op);
1957 } else {
1958 #endif
1959 // TODO: ARM also allows embedded shift in the address
1960 __ move(idx.result(), index_op);
1961 #ifdef _LP64
1962 }
1963 #endif
1964 __ shift_left(index_op, log2_scale, index_op);
1965 }
1966 #ifdef _LP64
1967 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
1968 LIR_Opr tmp = new_pointer_register();
1969 __ convert(Bytecodes::_i2l, index_op, tmp);
1970 index_op = tmp;
1971 }
1972 #endif
1974 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
1975 __ move(value.result(), addr);
1976 }
1979 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
1980 BasicType type = x->basic_type();
1981 LIRItem src(x->object(), this);
1982 LIRItem off(x->offset(), this);
1984 off.load_item();
1985 src.load_item();
1987 LIR_Opr reg = reg = rlock_result(x, x->basic_type());
1989 get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
1990 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
1991 }
1994 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
1995 BasicType type = x->basic_type();
1996 LIRItem src(x->object(), this);
1997 LIRItem off(x->offset(), this);
1998 LIRItem data(x->value(), this);
2000 src.load_item();
2001 if (type == T_BOOLEAN || type == T_BYTE) {
2002 data.load_byte_item();
2003 } else {
2004 data.load_item();
2005 }
2006 off.load_item();
2008 set_no_result(x);
2010 if (x->is_volatile() && os::is_MP()) __ membar_release();
2011 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2012 if (x->is_volatile() && os::is_MP()) __ membar();
2013 }
2016 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2017 LIRItem src(x->object(), this);
2018 LIRItem off(x->offset(), this);
2020 src.load_item();
2021 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2022 // let it be a constant
2023 off.dont_load_item();
2024 } else {
2025 off.load_item();
2026 }
2028 set_no_result(x);
2030 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2031 __ prefetch(addr, is_store);
2032 }
2035 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2036 do_UnsafePrefetch(x, false);
2037 }
2040 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2041 do_UnsafePrefetch(x, true);
2042 }
2045 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2046 int lng = x->length();
2048 for (int i = 0; i < lng; i++) {
2049 SwitchRange* one_range = x->at(i);
2050 int low_key = one_range->low_key();
2051 int high_key = one_range->high_key();
2052 BlockBegin* dest = one_range->sux();
2053 if (low_key == high_key) {
2054 __ cmp(lir_cond_equal, value, low_key);
2055 __ branch(lir_cond_equal, T_INT, dest);
2056 } else if (high_key - low_key == 1) {
2057 __ cmp(lir_cond_equal, value, low_key);
2058 __ branch(lir_cond_equal, T_INT, dest);
2059 __ cmp(lir_cond_equal, value, high_key);
2060 __ branch(lir_cond_equal, T_INT, dest);
2061 } else {
2062 LabelObj* L = new LabelObj();
2063 __ cmp(lir_cond_less, value, low_key);
2064 __ branch(lir_cond_less, L->label());
2065 __ cmp(lir_cond_lessEqual, value, high_key);
2066 __ branch(lir_cond_lessEqual, T_INT, dest);
2067 __ branch_destination(L->label());
2068 }
2069 }
2070 __ jump(default_sux);
2071 }
2074 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2075 SwitchRangeList* res = new SwitchRangeList();
2076 int len = x->length();
2077 if (len > 0) {
2078 BlockBegin* sux = x->sux_at(0);
2079 int key = x->lo_key();
2080 BlockBegin* default_sux = x->default_sux();
2081 SwitchRange* range = new SwitchRange(key, sux);
2082 for (int i = 0; i < len; i++, key++) {
2083 BlockBegin* new_sux = x->sux_at(i);
2084 if (sux == new_sux) {
2085 // still in same range
2086 range->set_high_key(key);
2087 } else {
2088 // skip tests which explicitly dispatch to the default
2089 if (sux != default_sux) {
2090 res->append(range);
2091 }
2092 range = new SwitchRange(key, new_sux);
2093 }
2094 sux = new_sux;
2095 }
2096 if (res->length() == 0 || res->last() != range) res->append(range);
2097 }
2098 return res;
2099 }
2102 // we expect the keys to be sorted by increasing value
2103 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2104 SwitchRangeList* res = new SwitchRangeList();
2105 int len = x->length();
2106 if (len > 0) {
2107 BlockBegin* default_sux = x->default_sux();
2108 int key = x->key_at(0);
2109 BlockBegin* sux = x->sux_at(0);
2110 SwitchRange* range = new SwitchRange(key, sux);
2111 for (int i = 1; i < len; i++) {
2112 int new_key = x->key_at(i);
2113 BlockBegin* new_sux = x->sux_at(i);
2114 if (key+1 == new_key && sux == new_sux) {
2115 // still in same range
2116 range->set_high_key(new_key);
2117 } else {
2118 // skip tests which explicitly dispatch to the default
2119 if (range->sux() != default_sux) {
2120 res->append(range);
2121 }
2122 range = new SwitchRange(new_key, new_sux);
2123 }
2124 key = new_key;
2125 sux = new_sux;
2126 }
2127 if (res->length() == 0 || res->last() != range) res->append(range);
2128 }
2129 return res;
2130 }
2133 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2134 LIRItem tag(x->tag(), this);
2135 tag.load_item();
2136 set_no_result(x);
2138 if (x->is_safepoint()) {
2139 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2140 }
2142 // move values into phi locations
2143 move_to_phi(x->state());
2145 int lo_key = x->lo_key();
2146 int hi_key = x->hi_key();
2147 int len = x->length();
2148 LIR_Opr value = tag.result();
2149 if (UseTableRanges) {
2150 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2151 } else {
2152 for (int i = 0; i < len; i++) {
2153 __ cmp(lir_cond_equal, value, i + lo_key);
2154 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2155 }
2156 __ jump(x->default_sux());
2157 }
2158 }
2161 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2162 LIRItem tag(x->tag(), this);
2163 tag.load_item();
2164 set_no_result(x);
2166 if (x->is_safepoint()) {
2167 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2168 }
2170 // move values into phi locations
2171 move_to_phi(x->state());
2173 LIR_Opr value = tag.result();
2174 if (UseTableRanges) {
2175 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2176 } else {
2177 int len = x->length();
2178 for (int i = 0; i < len; i++) {
2179 __ cmp(lir_cond_equal, value, x->key_at(i));
2180 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2181 }
2182 __ jump(x->default_sux());
2183 }
2184 }
2187 void LIRGenerator::do_Goto(Goto* x) {
2188 set_no_result(x);
2190 if (block()->next()->as_OsrEntry()) {
2191 // need to free up storage used for OSR entry point
2192 LIR_Opr osrBuffer = block()->next()->operand();
2193 BasicTypeList signature;
2194 signature.append(T_INT);
2195 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2196 __ move(osrBuffer, cc->args()->at(0));
2197 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2198 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2199 }
2201 if (x->is_safepoint()) {
2202 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2204 // increment backedge counter if needed
2205 CodeEmitInfo* info = state_for(x, state);
2206 increment_backedge_counter(info, info->stack()->bci());
2207 CodeEmitInfo* safepoint_info = state_for(x, state);
2208 __ safepoint(safepoint_poll_register(), safepoint_info);
2209 }
2211 // Gotos can be folded Ifs, handle this case.
2212 if (x->should_profile()) {
2213 ciMethod* method = x->profiled_method();
2214 assert(method != NULL, "method should be set if branch is profiled");
2215 ciMethodData* md = method->method_data_or_null();
2216 assert(md != NULL, "Sanity");
2217 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2218 assert(data != NULL, "must have profiling data");
2219 int offset;
2220 if (x->direction() == Goto::taken) {
2221 assert(data->is_BranchData(), "need BranchData for two-way branches");
2222 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2223 } else if (x->direction() == Goto::not_taken) {
2224 assert(data->is_BranchData(), "need BranchData for two-way branches");
2225 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2226 } else {
2227 assert(data->is_JumpData(), "need JumpData for branches");
2228 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2229 }
2230 LIR_Opr md_reg = new_register(T_OBJECT);
2231 __ oop2reg(md->constant_encoding(), md_reg);
2233 increment_counter(new LIR_Address(md_reg, offset,
2234 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2235 }
2237 // emit phi-instruction move after safepoint since this simplifies
2238 // describing the state as the safepoint.
2239 move_to_phi(x->state());
2241 __ jump(x->default_sux());
2242 }
2245 void LIRGenerator::do_Base(Base* x) {
2246 __ std_entry(LIR_OprFact::illegalOpr);
2247 // Emit moves from physical registers / stack slots to virtual registers
2248 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2249 IRScope* irScope = compilation()->hir()->top_scope();
2250 int java_index = 0;
2251 for (int i = 0; i < args->length(); i++) {
2252 LIR_Opr src = args->at(i);
2253 assert(!src->is_illegal(), "check");
2254 BasicType t = src->type();
2256 // Types which are smaller than int are passed as int, so
2257 // correct the type which passed.
2258 switch (t) {
2259 case T_BYTE:
2260 case T_BOOLEAN:
2261 case T_SHORT:
2262 case T_CHAR:
2263 t = T_INT;
2264 break;
2265 }
2267 LIR_Opr dest = new_register(t);
2268 __ move(src, dest);
2270 // Assign new location to Local instruction for this local
2271 Local* local = x->state()->local_at(java_index)->as_Local();
2272 assert(local != NULL, "Locals for incoming arguments must have been created");
2273 #ifndef __SOFTFP__
2274 // The java calling convention passes double as long and float as int.
2275 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2276 #endif // __SOFTFP__
2277 local->set_operand(dest);
2278 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2279 java_index += type2size[t];
2280 }
2282 if (compilation()->env()->dtrace_method_probes()) {
2283 BasicTypeList signature;
2284 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2285 signature.append(T_OBJECT); // methodOop
2286 LIR_OprList* args = new LIR_OprList();
2287 args->append(getThreadPointer());
2288 LIR_Opr meth = new_register(T_OBJECT);
2289 __ oop2reg(method()->constant_encoding(), meth);
2290 args->append(meth);
2291 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2292 }
2294 if (method()->is_synchronized()) {
2295 LIR_Opr obj;
2296 if (method()->is_static()) {
2297 obj = new_register(T_OBJECT);
2298 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2299 } else {
2300 Local* receiver = x->state()->local_at(0)->as_Local();
2301 assert(receiver != NULL, "must already exist");
2302 obj = receiver->operand();
2303 }
2304 assert(obj->is_valid(), "must be valid");
2306 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2307 LIR_Opr lock = new_register(T_INT);
2308 __ load_stack_address_monitor(0, lock);
2310 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2311 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2313 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2314 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2315 }
2316 }
2318 // increment invocation counters if needed
2319 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2320 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2321 increment_invocation_counter(info);
2322 }
2324 // all blocks with a successor must end with an unconditional jump
2325 // to the successor even if they are consecutive
2326 __ jump(x->default_sux());
2327 }
2330 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2331 // construct our frame and model the production of incoming pointer
2332 // to the OSR buffer.
2333 __ osr_entry(LIR_Assembler::osrBufferPointer());
2334 LIR_Opr result = rlock_result(x);
2335 __ move(LIR_Assembler::osrBufferPointer(), result);
2336 }
2339 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2340 int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
2341 for (; i < args->length(); i++) {
2342 LIRItem* param = args->at(i);
2343 LIR_Opr loc = arg_list->at(i);
2344 if (loc->is_register()) {
2345 param->load_item_force(loc);
2346 } else {
2347 LIR_Address* addr = loc->as_address_ptr();
2348 param->load_for_store(addr->type());
2349 if (addr->type() == T_OBJECT) {
2350 __ move_wide(param->result(), addr);
2351 } else
2352 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2353 __ unaligned_move(param->result(), addr);
2354 } else {
2355 __ move(param->result(), addr);
2356 }
2357 }
2358 }
2360 if (x->has_receiver()) {
2361 LIRItem* receiver = args->at(0);
2362 LIR_Opr loc = arg_list->at(0);
2363 if (loc->is_register()) {
2364 receiver->load_item_force(loc);
2365 } else {
2366 assert(loc->is_address(), "just checking");
2367 receiver->load_for_store(T_OBJECT);
2368 __ move_wide(receiver->result(), loc->as_address_ptr());
2369 }
2370 }
2371 }
2374 // Visits all arguments, returns appropriate items without loading them
2375 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2376 LIRItemList* argument_items = new LIRItemList();
2377 if (x->has_receiver()) {
2378 LIRItem* receiver = new LIRItem(x->receiver(), this);
2379 argument_items->append(receiver);
2380 }
2381 if (x->is_invokedynamic()) {
2382 // Insert a dummy for the synthetic MethodHandle argument.
2383 argument_items->append(NULL);
2384 }
2385 int idx = x->has_receiver() ? 1 : 0;
2386 for (int i = 0; i < x->number_of_arguments(); i++) {
2387 LIRItem* param = new LIRItem(x->argument_at(i), this);
2388 argument_items->append(param);
2389 idx += (param->type()->is_double_word() ? 2 : 1);
2390 }
2391 return argument_items;
2392 }
2395 // The invoke with receiver has following phases:
2396 // a) traverse and load/lock receiver;
2397 // b) traverse all arguments -> item-array (invoke_visit_argument)
2398 // c) push receiver on stack
2399 // d) load each of the items and push on stack
2400 // e) unlock receiver
2401 // f) move receiver into receiver-register %o0
2402 // g) lock result registers and emit call operation
2403 //
2404 // Before issuing a call, we must spill-save all values on stack
2405 // that are in caller-save register. "spill-save" moves thos registers
2406 // either in a free callee-save register or spills them if no free
2407 // callee save register is available.
2408 //
2409 // The problem is where to invoke spill-save.
2410 // - if invoked between e) and f), we may lock callee save
2411 // register in "spill-save" that destroys the receiver register
2412 // before f) is executed
2413 // - if we rearange the f) to be earlier, by loading %o0, it
2414 // may destroy a value on the stack that is currently in %o0
2415 // and is waiting to be spilled
2416 // - if we keep the receiver locked while doing spill-save,
2417 // we cannot spill it as it is spill-locked
2418 //
2419 void LIRGenerator::do_Invoke(Invoke* x) {
2420 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2422 LIR_OprList* arg_list = cc->args();
2423 LIRItemList* args = invoke_visit_arguments(x);
2424 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2426 // setup result register
2427 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2428 if (x->type() != voidType) {
2429 result_register = result_register_for(x->type());
2430 }
2432 CodeEmitInfo* info = state_for(x, x->state());
2434 // invokedynamics can deoptimize.
2435 CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
2437 invoke_load_arguments(x, args, arg_list);
2439 if (x->has_receiver()) {
2440 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2441 receiver = args->at(0)->result();
2442 }
2444 // emit invoke code
2445 bool optimized = x->target_is_loaded() && x->target_is_final();
2446 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2448 // JSR 292
2449 // Preserve the SP over MethodHandle call sites.
2450 ciMethod* target = x->target();
2451 if (target->is_method_handle_invoke()) {
2452 info->set_is_method_handle_invoke(true);
2453 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2454 }
2456 switch (x->code()) {
2457 case Bytecodes::_invokestatic:
2458 __ call_static(target, result_register,
2459 SharedRuntime::get_resolve_static_call_stub(),
2460 arg_list, info);
2461 break;
2462 case Bytecodes::_invokespecial:
2463 case Bytecodes::_invokevirtual:
2464 case Bytecodes::_invokeinterface:
2465 // for final target we still produce an inline cache, in order
2466 // to be able to call mixed mode
2467 if (x->code() == Bytecodes::_invokespecial || optimized) {
2468 __ call_opt_virtual(target, receiver, result_register,
2469 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2470 arg_list, info);
2471 } else if (x->vtable_index() < 0) {
2472 __ call_icvirtual(target, receiver, result_register,
2473 SharedRuntime::get_resolve_virtual_call_stub(),
2474 arg_list, info);
2475 } else {
2476 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2477 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2478 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2479 }
2480 break;
2481 case Bytecodes::_invokedynamic: {
2482 ciBytecodeStream bcs(x->scope()->method());
2483 bcs.force_bci(x->state()->bci());
2484 assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
2485 ciCPCache* cpcache = bcs.get_cpcache();
2487 // Get CallSite offset from constant pool cache pointer.
2488 int index = bcs.get_method_index();
2489 size_t call_site_offset = cpcache->get_f1_offset(index);
2491 // If this invokedynamic call site hasn't been executed yet in
2492 // the interpreter, the CallSite object in the constant pool
2493 // cache is still null and we need to deoptimize.
2494 if (cpcache->is_f1_null_at(index)) {
2495 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
2496 // clone all handlers. This is handled transparently in other
2497 // places by the CodeEmitInfo cloning logic but is handled
2498 // specially here because a stub isn't being used.
2499 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
2501 DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
2502 __ jump(deopt_stub);
2503 }
2505 // Use the receiver register for the synthetic MethodHandle
2506 // argument.
2507 receiver = LIR_Assembler::receiverOpr();
2508 LIR_Opr tmp = new_register(objectType);
2510 // Load CallSite object from constant pool cache.
2511 __ oop2reg(cpcache->constant_encoding(), tmp);
2512 __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
2514 // Load target MethodHandle from CallSite object.
2515 __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
2517 __ call_dynamic(target, receiver, result_register,
2518 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2519 arg_list, info);
2520 break;
2521 }
2522 default:
2523 ShouldNotReachHere();
2524 break;
2525 }
2527 // JSR 292
2528 // Restore the SP after MethodHandle call sites.
2529 if (target->is_method_handle_invoke()) {
2530 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2531 }
2533 if (x->type()->is_float() || x->type()->is_double()) {
2534 // Force rounding of results from non-strictfp when in strictfp
2535 // scope (or when we don't know the strictness of the callee, to
2536 // be safe.)
2537 if (method()->is_strict()) {
2538 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2539 result_register = round_item(result_register);
2540 }
2541 }
2542 }
2544 if (result_register->is_valid()) {
2545 LIR_Opr result = rlock_result(x);
2546 __ move(result_register, result);
2547 }
2548 }
2551 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2552 assert(x->number_of_arguments() == 1, "wrong type");
2553 LIRItem value (x->argument_at(0), this);
2554 LIR_Opr reg = rlock_result(x);
2555 value.load_item();
2556 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2557 __ move(tmp, reg);
2558 }
2562 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2563 void LIRGenerator::do_IfOp(IfOp* x) {
2564 #ifdef ASSERT
2565 {
2566 ValueTag xtag = x->x()->type()->tag();
2567 ValueTag ttag = x->tval()->type()->tag();
2568 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2569 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2570 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2571 }
2572 #endif
2574 LIRItem left(x->x(), this);
2575 LIRItem right(x->y(), this);
2576 left.load_item();
2577 if (can_inline_as_constant(right.value())) {
2578 right.dont_load_item();
2579 } else {
2580 right.load_item();
2581 }
2583 LIRItem t_val(x->tval(), this);
2584 LIRItem f_val(x->fval(), this);
2585 t_val.dont_load_item();
2586 f_val.dont_load_item();
2587 LIR_Opr reg = rlock_result(x);
2589 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2590 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2591 }
2594 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2595 switch (x->id()) {
2596 case vmIntrinsics::_intBitsToFloat :
2597 case vmIntrinsics::_doubleToRawLongBits :
2598 case vmIntrinsics::_longBitsToDouble :
2599 case vmIntrinsics::_floatToRawIntBits : {
2600 do_FPIntrinsics(x);
2601 break;
2602 }
2604 case vmIntrinsics::_currentTimeMillis: {
2605 assert(x->number_of_arguments() == 0, "wrong type");
2606 LIR_Opr reg = result_register_for(x->type());
2607 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
2608 reg, new LIR_OprList());
2609 LIR_Opr result = rlock_result(x);
2610 __ move(reg, result);
2611 break;
2612 }
2614 case vmIntrinsics::_nanoTime: {
2615 assert(x->number_of_arguments() == 0, "wrong type");
2616 LIR_Opr reg = result_register_for(x->type());
2617 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
2618 reg, new LIR_OprList());
2619 LIR_Opr result = rlock_result(x);
2620 __ move(reg, result);
2621 break;
2622 }
2624 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2625 case vmIntrinsics::_getClass: do_getClass(x); break;
2626 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2628 case vmIntrinsics::_dlog: // fall through
2629 case vmIntrinsics::_dlog10: // fall through
2630 case vmIntrinsics::_dabs: // fall through
2631 case vmIntrinsics::_dsqrt: // fall through
2632 case vmIntrinsics::_dtan: // fall through
2633 case vmIntrinsics::_dsin : // fall through
2634 case vmIntrinsics::_dcos : do_MathIntrinsic(x); break;
2635 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
2637 // java.nio.Buffer.checkIndex
2638 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
2640 case vmIntrinsics::_compareAndSwapObject:
2641 do_CompareAndSwap(x, objectType);
2642 break;
2643 case vmIntrinsics::_compareAndSwapInt:
2644 do_CompareAndSwap(x, intType);
2645 break;
2646 case vmIntrinsics::_compareAndSwapLong:
2647 do_CompareAndSwap(x, longType);
2648 break;
2650 // sun.misc.AtomicLongCSImpl.attemptUpdate
2651 case vmIntrinsics::_attemptUpdate:
2652 do_AttemptUpdate(x);
2653 break;
2655 default: ShouldNotReachHere(); break;
2656 }
2657 }
2659 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
2660 // Need recv in a temporary register so it interferes with the other temporaries
2661 LIR_Opr recv = LIR_OprFact::illegalOpr;
2662 LIR_Opr mdo = new_register(T_OBJECT);
2663 // tmp is used to hold the counters on SPARC
2664 LIR_Opr tmp = new_pointer_register();
2665 if (x->recv() != NULL) {
2666 LIRItem value(x->recv(), this);
2667 value.load_item();
2668 recv = new_register(T_OBJECT);
2669 __ move(value.result(), recv);
2670 }
2671 __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
2672 }
2674 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
2675 // We can safely ignore accessors here, since c2 will inline them anyway,
2676 // accessors are also always mature.
2677 if (!x->inlinee()->is_accessor()) {
2678 CodeEmitInfo* info = state_for(x, x->state(), true);
2679 // Increment invocation counter, don't notify the runtime, because we don't inline loops,
2680 increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false);
2681 }
2682 }
2684 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
2685 int freq_log;
2686 int level = compilation()->env()->comp_level();
2687 if (level == CompLevel_limited_profile) {
2688 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
2689 } else if (level == CompLevel_full_profile) {
2690 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
2691 } else {
2692 ShouldNotReachHere();
2693 }
2694 // Increment the appropriate invocation/backedge counter and notify the runtime.
2695 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
2696 }
2698 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
2699 ciMethod *method, int frequency,
2700 int bci, bool backedge, bool notify) {
2701 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
2702 int level = _compilation->env()->comp_level();
2703 assert(level > CompLevel_simple, "Shouldn't be here");
2705 int offset = -1;
2706 LIR_Opr counter_holder = new_register(T_OBJECT);
2707 LIR_Opr meth;
2708 if (level == CompLevel_limited_profile) {
2709 offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
2710 methodOopDesc::invocation_counter_offset());
2711 __ oop2reg(method->constant_encoding(), counter_holder);
2712 meth = counter_holder;
2713 } else if (level == CompLevel_full_profile) {
2714 offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
2715 methodDataOopDesc::invocation_counter_offset());
2716 ciMethodData* md = method->method_data_or_null();
2717 assert(md != NULL, "Sanity");
2718 __ oop2reg(md->constant_encoding(), counter_holder);
2719 meth = new_register(T_OBJECT);
2720 __ oop2reg(method->constant_encoding(), meth);
2721 } else {
2722 ShouldNotReachHere();
2723 }
2724 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
2725 LIR_Opr result = new_register(T_INT);
2726 __ load(counter, result);
2727 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
2728 __ store(result, counter);
2729 if (notify) {
2730 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
2731 __ logical_and(result, mask, result);
2732 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
2733 // The bci for info can point to cmp for if's we want the if bci
2734 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
2735 __ branch(lir_cond_equal, T_INT, overflow);
2736 __ branch_destination(overflow->continuation());
2737 }
2738 }
2740 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
2741 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
2742 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
2744 if (x->pass_thread()) {
2745 signature->append(T_ADDRESS);
2746 args->append(getThreadPointer());
2747 }
2749 for (int i = 0; i < x->number_of_arguments(); i++) {
2750 Value a = x->argument_at(i);
2751 LIRItem* item = new LIRItem(a, this);
2752 item->load_item();
2753 args->append(item->result());
2754 signature->append(as_BasicType(a->type()));
2755 }
2757 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
2758 if (x->type() == voidType) {
2759 set_no_result(x);
2760 } else {
2761 __ move(result, rlock_result(x));
2762 }
2763 }
2765 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
2766 LIRItemList args(1);
2767 LIRItem value(arg1, this);
2768 args.append(&value);
2769 BasicTypeList signature;
2770 signature.append(as_BasicType(arg1->type()));
2772 return call_runtime(&signature, &args, entry, result_type, info);
2773 }
2776 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
2777 LIRItemList args(2);
2778 LIRItem value1(arg1, this);
2779 LIRItem value2(arg2, this);
2780 args.append(&value1);
2781 args.append(&value2);
2782 BasicTypeList signature;
2783 signature.append(as_BasicType(arg1->type()));
2784 signature.append(as_BasicType(arg2->type()));
2786 return call_runtime(&signature, &args, entry, result_type, info);
2787 }
2790 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
2791 address entry, ValueType* result_type, CodeEmitInfo* info) {
2792 // get a result register
2793 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
2794 LIR_Opr result = LIR_OprFact::illegalOpr;
2795 if (result_type->tag() != voidTag) {
2796 result = new_register(result_type);
2797 phys_reg = result_register_for(result_type);
2798 }
2800 // move the arguments into the correct location
2801 CallingConvention* cc = frame_map()->c_calling_convention(signature);
2802 assert(cc->length() == args->length(), "argument mismatch");
2803 for (int i = 0; i < args->length(); i++) {
2804 LIR_Opr arg = args->at(i);
2805 LIR_Opr loc = cc->at(i);
2806 if (loc->is_register()) {
2807 __ move(arg, loc);
2808 } else {
2809 LIR_Address* addr = loc->as_address_ptr();
2810 // if (!can_store_as_constant(arg)) {
2811 // LIR_Opr tmp = new_register(arg->type());
2812 // __ move(arg, tmp);
2813 // arg = tmp;
2814 // }
2815 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2816 __ unaligned_move(arg, addr);
2817 } else {
2818 __ move(arg, addr);
2819 }
2820 }
2821 }
2823 if (info) {
2824 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
2825 } else {
2826 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
2827 }
2828 if (result->is_valid()) {
2829 __ move(phys_reg, result);
2830 }
2831 return result;
2832 }
2835 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
2836 address entry, ValueType* result_type, CodeEmitInfo* info) {
2837 // get a result register
2838 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
2839 LIR_Opr result = LIR_OprFact::illegalOpr;
2840 if (result_type->tag() != voidTag) {
2841 result = new_register(result_type);
2842 phys_reg = result_register_for(result_type);
2843 }
2845 // move the arguments into the correct location
2846 CallingConvention* cc = frame_map()->c_calling_convention(signature);
2848 assert(cc->length() == args->length(), "argument mismatch");
2849 for (int i = 0; i < args->length(); i++) {
2850 LIRItem* arg = args->at(i);
2851 LIR_Opr loc = cc->at(i);
2852 if (loc->is_register()) {
2853 arg->load_item_force(loc);
2854 } else {
2855 LIR_Address* addr = loc->as_address_ptr();
2856 arg->load_for_store(addr->type());
2857 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2858 __ unaligned_move(arg->result(), addr);
2859 } else {
2860 __ move(arg->result(), addr);
2861 }
2862 }
2863 }
2865 if (info) {
2866 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
2867 } else {
2868 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
2869 }
2870 if (result->is_valid()) {
2871 __ move(phys_reg, result);
2872 }
2873 return result;
2874 }