Tue, 15 Sep 2009 21:53:47 -0700
6863023: need non-perm oops in code cache for JSR 292
Summary: Make a special root-list for those few nmethods which might contain non-perm oops.
Reviewed-by: twisti, kvn, never, jmasa, ysr
1 /*
2 * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_c1_LIRGenerator.cpp.incl"
28 #ifdef ASSERT
29 #define __ gen()->lir(__FILE__, __LINE__)->
30 #else
31 #define __ gen()->lir()->
32 #endif
35 void PhiResolverState::reset(int max_vregs) {
36 // Initialize array sizes
37 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
38 _virtual_operands.trunc_to(0);
39 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
40 _other_operands.trunc_to(0);
41 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
42 _vreg_table.trunc_to(0);
43 }
47 //--------------------------------------------------------------
48 // PhiResolver
50 // Resolves cycles:
51 //
52 // r1 := r2 becomes temp := r1
53 // r2 := r1 r1 := r2
54 // r2 := temp
55 // and orders moves:
56 //
57 // r2 := r3 becomes r1 := r2
58 // r1 := r2 r2 := r3
60 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
61 : _gen(gen)
62 , _state(gen->resolver_state())
63 , _temp(LIR_OprFact::illegalOpr)
64 {
65 // reinitialize the shared state arrays
66 _state.reset(max_vregs);
67 }
70 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
71 assert(src->is_valid(), "");
72 assert(dest->is_valid(), "");
73 __ move(src, dest);
74 }
77 void PhiResolver::move_temp_to(LIR_Opr dest) {
78 assert(_temp->is_valid(), "");
79 emit_move(_temp, dest);
80 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
81 }
84 void PhiResolver::move_to_temp(LIR_Opr src) {
85 assert(_temp->is_illegal(), "");
86 _temp = _gen->new_register(src->type());
87 emit_move(src, _temp);
88 }
91 // Traverse assignment graph in depth first order and generate moves in post order
92 // ie. two assignments: b := c, a := b start with node c:
93 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
94 // Generates moves in this order: move b to a and move c to b
95 // ie. cycle a := b, b := a start with node a
96 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
97 // Generates moves in this order: move b to temp, move a to b, move temp to a
98 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
99 if (!dest->visited()) {
100 dest->set_visited();
101 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
102 move(dest, dest->destination_at(i));
103 }
104 } else if (!dest->start_node()) {
105 // cylce in graph detected
106 assert(_loop == NULL, "only one loop valid!");
107 _loop = dest;
108 move_to_temp(src->operand());
109 return;
110 } // else dest is a start node
112 if (!dest->assigned()) {
113 if (_loop == dest) {
114 move_temp_to(dest->operand());
115 dest->set_assigned();
116 } else if (src != NULL) {
117 emit_move(src->operand(), dest->operand());
118 dest->set_assigned();
119 }
120 }
121 }
124 PhiResolver::~PhiResolver() {
125 int i;
126 // resolve any cycles in moves from and to virtual registers
127 for (i = virtual_operands().length() - 1; i >= 0; i --) {
128 ResolveNode* node = virtual_operands()[i];
129 if (!node->visited()) {
130 _loop = NULL;
131 move(NULL, node);
132 node->set_start_node();
133 assert(_temp->is_illegal(), "move_temp_to() call missing");
134 }
135 }
137 // generate move for move from non virtual register to abitrary destination
138 for (i = other_operands().length() - 1; i >= 0; i --) {
139 ResolveNode* node = other_operands()[i];
140 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
141 emit_move(node->operand(), node->destination_at(j)->operand());
142 }
143 }
144 }
147 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
148 ResolveNode* node;
149 if (opr->is_virtual()) {
150 int vreg_num = opr->vreg_number();
151 node = vreg_table().at_grow(vreg_num, NULL);
152 assert(node == NULL || node->operand() == opr, "");
153 if (node == NULL) {
154 node = new ResolveNode(opr);
155 vreg_table()[vreg_num] = node;
156 }
157 // Make sure that all virtual operands show up in the list when
158 // they are used as the source of a move.
159 if (source && !virtual_operands().contains(node)) {
160 virtual_operands().append(node);
161 }
162 } else {
163 assert(source, "");
164 node = new ResolveNode(opr);
165 other_operands().append(node);
166 }
167 return node;
168 }
171 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
172 assert(dest->is_virtual(), "");
173 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
174 assert(src->is_valid(), "");
175 assert(dest->is_valid(), "");
176 ResolveNode* source = source_node(src);
177 source->append(destination_node(dest));
178 }
181 //--------------------------------------------------------------
182 // LIRItem
184 void LIRItem::set_result(LIR_Opr opr) {
185 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
186 value()->set_operand(opr);
188 if (opr->is_virtual()) {
189 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
190 }
192 _result = opr;
193 }
195 void LIRItem::load_item() {
196 if (result()->is_illegal()) {
197 // update the items result
198 _result = value()->operand();
199 }
200 if (!result()->is_register()) {
201 LIR_Opr reg = _gen->new_register(value()->type());
202 __ move(result(), reg);
203 if (result()->is_constant()) {
204 _result = reg;
205 } else {
206 set_result(reg);
207 }
208 }
209 }
212 void LIRItem::load_for_store(BasicType type) {
213 if (_gen->can_store_as_constant(value(), type)) {
214 _result = value()->operand();
215 if (!_result->is_constant()) {
216 _result = LIR_OprFact::value_type(value()->type());
217 }
218 } else if (type == T_BYTE || type == T_BOOLEAN) {
219 load_byte_item();
220 } else {
221 load_item();
222 }
223 }
225 void LIRItem::load_item_force(LIR_Opr reg) {
226 LIR_Opr r = result();
227 if (r != reg) {
228 if (r->type() != reg->type()) {
229 // moves between different types need an intervening spill slot
230 LIR_Opr tmp = _gen->force_to_spill(r, reg->type());
231 __ move(tmp, reg);
232 } else {
233 __ move(r, reg);
234 }
235 _result = reg;
236 }
237 }
239 ciObject* LIRItem::get_jobject_constant() const {
240 ObjectType* oc = type()->as_ObjectType();
241 if (oc) {
242 return oc->constant_value();
243 }
244 return NULL;
245 }
248 jint LIRItem::get_jint_constant() const {
249 assert(is_constant() && value() != NULL, "");
250 assert(type()->as_IntConstant() != NULL, "type check");
251 return type()->as_IntConstant()->value();
252 }
255 jint LIRItem::get_address_constant() const {
256 assert(is_constant() && value() != NULL, "");
257 assert(type()->as_AddressConstant() != NULL, "type check");
258 return type()->as_AddressConstant()->value();
259 }
262 jfloat LIRItem::get_jfloat_constant() const {
263 assert(is_constant() && value() != NULL, "");
264 assert(type()->as_FloatConstant() != NULL, "type check");
265 return type()->as_FloatConstant()->value();
266 }
269 jdouble LIRItem::get_jdouble_constant() const {
270 assert(is_constant() && value() != NULL, "");
271 assert(type()->as_DoubleConstant() != NULL, "type check");
272 return type()->as_DoubleConstant()->value();
273 }
276 jlong LIRItem::get_jlong_constant() const {
277 assert(is_constant() && value() != NULL, "");
278 assert(type()->as_LongConstant() != NULL, "type check");
279 return type()->as_LongConstant()->value();
280 }
284 //--------------------------------------------------------------
287 void LIRGenerator::init() {
288 _bs = Universe::heap()->barrier_set();
289 }
292 void LIRGenerator::block_do_prolog(BlockBegin* block) {
293 #ifndef PRODUCT
294 if (PrintIRWithLIR) {
295 block->print();
296 }
297 #endif
299 // set up the list of LIR instructions
300 assert(block->lir() == NULL, "LIR list already computed for this block");
301 _lir = new LIR_List(compilation(), block);
302 block->set_lir(_lir);
304 __ branch_destination(block->label());
306 if (LIRTraceExecution &&
307 Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() &&
308 !block->is_set(BlockBegin::exception_entry_flag)) {
309 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
310 trace_block_entry(block);
311 }
312 }
315 void LIRGenerator::block_do_epilog(BlockBegin* block) {
316 #ifndef PRODUCT
317 if (PrintIRWithLIR) {
318 tty->cr();
319 }
320 #endif
322 // LIR_Opr for unpinned constants shouldn't be referenced by other
323 // blocks so clear them out after processing the block.
324 for (int i = 0; i < _unpinned_constants.length(); i++) {
325 _unpinned_constants.at(i)->clear_operand();
326 }
327 _unpinned_constants.trunc_to(0);
329 // clear our any registers for other local constants
330 _constants.trunc_to(0);
331 _reg_for_constants.trunc_to(0);
332 }
335 void LIRGenerator::block_do(BlockBegin* block) {
336 CHECK_BAILOUT();
338 block_do_prolog(block);
339 set_block(block);
341 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
342 if (instr->is_pinned()) do_root(instr);
343 }
345 set_block(NULL);
346 block_do_epilog(block);
347 }
350 //-------------------------LIRGenerator-----------------------------
352 // This is where the tree-walk starts; instr must be root;
353 void LIRGenerator::do_root(Value instr) {
354 CHECK_BAILOUT();
356 InstructionMark im(compilation(), instr);
358 assert(instr->is_pinned(), "use only with roots");
359 assert(instr->subst() == instr, "shouldn't have missed substitution");
361 instr->visit(this);
363 assert(!instr->has_uses() || instr->operand()->is_valid() ||
364 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
365 }
368 // This is called for each node in tree; the walk stops if a root is reached
369 void LIRGenerator::walk(Value instr) {
370 InstructionMark im(compilation(), instr);
371 //stop walk when encounter a root
372 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
373 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
374 } else {
375 assert(instr->subst() == instr, "shouldn't have missed substitution");
376 instr->visit(this);
377 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
378 }
379 }
382 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
383 int index;
384 Value value;
385 for_each_stack_value(state, index, value) {
386 assert(value->subst() == value, "missed substition");
387 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
388 walk(value);
389 assert(value->operand()->is_valid(), "must be evaluated now");
390 }
391 }
392 ValueStack* s = state;
393 int bci = x->bci();
394 for_each_state(s) {
395 IRScope* scope = s->scope();
396 ciMethod* method = scope->method();
398 MethodLivenessResult liveness = method->liveness_at_bci(bci);
399 if (bci == SynchronizationEntryBCI) {
400 if (x->as_ExceptionObject() || x->as_Throw()) {
401 // all locals are dead on exit from the synthetic unlocker
402 liveness.clear();
403 } else {
404 assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
405 }
406 }
407 if (!liveness.is_valid()) {
408 // Degenerate or breakpointed method.
409 bailout("Degenerate or breakpointed method");
410 } else {
411 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
412 for_each_local_value(s, index, value) {
413 assert(value->subst() == value, "missed substition");
414 if (liveness.at(index) && !value->type()->is_illegal()) {
415 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
416 walk(value);
417 assert(value->operand()->is_valid(), "must be evaluated now");
418 }
419 } else {
420 // NULL out this local so that linear scan can assume that all non-NULL values are live.
421 s->invalidate_local(index);
422 }
423 }
424 }
425 bci = scope->caller_bci();
426 }
428 return new CodeEmitInfo(x->bci(), state, ignore_xhandler ? NULL : x->exception_handlers());
429 }
432 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
433 return state_for(x, x->lock_stack());
434 }
437 void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
438 if (!obj->is_loaded() || PatchALot) {
439 assert(info != NULL, "info must be set if class is not loaded");
440 __ oop2reg_patch(NULL, r, info);
441 } else {
442 // no patching needed
443 __ oop2reg(obj->constant_encoding(), r);
444 }
445 }
448 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
449 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
450 CodeStub* stub = new RangeCheckStub(range_check_info, index);
451 if (index->is_constant()) {
452 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
453 index->as_jint(), null_check_info);
454 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
455 } else {
456 cmp_reg_mem(lir_cond_aboveEqual, index, array,
457 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
458 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
459 }
460 }
463 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
464 CodeStub* stub = new RangeCheckStub(info, index, true);
465 if (index->is_constant()) {
466 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
467 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
468 } else {
469 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
470 java_nio_Buffer::limit_offset(), T_INT, info);
471 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
472 }
473 __ move(index, result);
474 }
477 // increment a counter returning the incremented value
478 LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) {
479 LIR_Address* counter = new LIR_Address(base, offset, T_INT);
480 LIR_Opr result = new_register(T_INT);
481 __ load(counter, result);
482 __ add(result, LIR_OprFact::intConst(increment), result);
483 __ store(result, counter);
484 return result;
485 }
488 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
489 LIR_Opr result_op = result;
490 LIR_Opr left_op = left;
491 LIR_Opr right_op = right;
493 if (TwoOperandLIRForm && left_op != result_op) {
494 assert(right_op != result_op, "malformed");
495 __ move(left_op, result_op);
496 left_op = result_op;
497 }
499 switch(code) {
500 case Bytecodes::_dadd:
501 case Bytecodes::_fadd:
502 case Bytecodes::_ladd:
503 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
504 case Bytecodes::_fmul:
505 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
507 case Bytecodes::_dmul:
508 {
509 if (is_strictfp) {
510 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
511 } else {
512 __ mul(left_op, right_op, result_op); break;
513 }
514 }
515 break;
517 case Bytecodes::_imul:
518 {
519 bool did_strength_reduce = false;
521 if (right->is_constant()) {
522 int c = right->as_jint();
523 if (is_power_of_2(c)) {
524 // do not need tmp here
525 __ shift_left(left_op, exact_log2(c), result_op);
526 did_strength_reduce = true;
527 } else {
528 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
529 }
530 }
531 // we couldn't strength reduce so just emit the multiply
532 if (!did_strength_reduce) {
533 __ mul(left_op, right_op, result_op);
534 }
535 }
536 break;
538 case Bytecodes::_dsub:
539 case Bytecodes::_fsub:
540 case Bytecodes::_lsub:
541 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
543 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
544 // ldiv and lrem are implemented with a direct runtime call
546 case Bytecodes::_ddiv:
547 {
548 if (is_strictfp) {
549 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
550 } else {
551 __ div (left_op, right_op, result_op); break;
552 }
553 }
554 break;
556 case Bytecodes::_drem:
557 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
559 default: ShouldNotReachHere();
560 }
561 }
564 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
565 arithmetic_op(code, result, left, right, false, tmp);
566 }
569 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
570 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
571 }
574 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
575 arithmetic_op(code, result, left, right, is_strictfp, tmp);
576 }
579 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
580 if (TwoOperandLIRForm && value != result_op) {
581 assert(count != result_op, "malformed");
582 __ move(value, result_op);
583 value = result_op;
584 }
586 assert(count->is_constant() || count->is_register(), "must be");
587 switch(code) {
588 case Bytecodes::_ishl:
589 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
590 case Bytecodes::_ishr:
591 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
592 case Bytecodes::_iushr:
593 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
594 default: ShouldNotReachHere();
595 }
596 }
599 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
600 if (TwoOperandLIRForm && left_op != result_op) {
601 assert(right_op != result_op, "malformed");
602 __ move(left_op, result_op);
603 left_op = result_op;
604 }
606 switch(code) {
607 case Bytecodes::_iand:
608 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
610 case Bytecodes::_ior:
611 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
613 case Bytecodes::_ixor:
614 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
616 default: ShouldNotReachHere();
617 }
618 }
621 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
622 if (!GenerateSynchronizationCode) return;
623 // for slow path, use debug info for state after successful locking
624 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
625 __ load_stack_address_monitor(monitor_no, lock);
626 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
627 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
628 }
631 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) {
632 if (!GenerateSynchronizationCode) return;
633 // setup registers
634 LIR_Opr hdr = lock;
635 lock = new_hdr;
636 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
637 __ load_stack_address_monitor(monitor_no, lock);
638 __ unlock_object(hdr, object, lock, slow_path);
639 }
642 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
643 jobject2reg_with_patching(klass_reg, klass, info);
644 // If klass is not loaded we do not know if the klass has finalizers:
645 if (UseFastNewInstance && klass->is_loaded()
646 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
648 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
650 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
652 assert(klass->is_loaded(), "must be loaded");
653 // allocate space for instance
654 assert(klass->size_helper() >= 0, "illegal instance size");
655 const int instance_size = align_object_size(klass->size_helper());
656 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
657 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
658 } else {
659 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
660 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
661 __ branch_destination(slow_path->continuation());
662 }
663 }
666 static bool is_constant_zero(Instruction* inst) {
667 IntConstant* c = inst->type()->as_IntConstant();
668 if (c) {
669 return (c->value() == 0);
670 }
671 return false;
672 }
675 static bool positive_constant(Instruction* inst) {
676 IntConstant* c = inst->type()->as_IntConstant();
677 if (c) {
678 return (c->value() >= 0);
679 }
680 return false;
681 }
684 static ciArrayKlass* as_array_klass(ciType* type) {
685 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
686 return (ciArrayKlass*)type;
687 } else {
688 return NULL;
689 }
690 }
692 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
693 Instruction* src = x->argument_at(0);
694 Instruction* src_pos = x->argument_at(1);
695 Instruction* dst = x->argument_at(2);
696 Instruction* dst_pos = x->argument_at(3);
697 Instruction* length = x->argument_at(4);
699 // first try to identify the likely type of the arrays involved
700 ciArrayKlass* expected_type = NULL;
701 bool is_exact = false;
702 {
703 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
704 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
705 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
706 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
707 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
708 // the types exactly match so the type is fully known
709 is_exact = true;
710 expected_type = src_exact_type;
711 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
712 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
713 ciArrayKlass* src_type = NULL;
714 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
715 src_type = (ciArrayKlass*) src_exact_type;
716 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
717 src_type = (ciArrayKlass*) src_declared_type;
718 }
719 if (src_type != NULL) {
720 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
721 is_exact = true;
722 expected_type = dst_type;
723 }
724 }
725 }
726 // at least pass along a good guess
727 if (expected_type == NULL) expected_type = dst_exact_type;
728 if (expected_type == NULL) expected_type = src_declared_type;
729 if (expected_type == NULL) expected_type = dst_declared_type;
730 }
732 // if a probable array type has been identified, figure out if any
733 // of the required checks for a fast case can be elided.
734 int flags = LIR_OpArrayCopy::all_flags;
735 if (expected_type != NULL) {
736 // try to skip null checks
737 if (src->as_NewArray() != NULL)
738 flags &= ~LIR_OpArrayCopy::src_null_check;
739 if (dst->as_NewArray() != NULL)
740 flags &= ~LIR_OpArrayCopy::dst_null_check;
742 // check from incoming constant values
743 if (positive_constant(src_pos))
744 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
745 if (positive_constant(dst_pos))
746 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
747 if (positive_constant(length))
748 flags &= ~LIR_OpArrayCopy::length_positive_check;
750 // see if the range check can be elided, which might also imply
751 // that src or dst is non-null.
752 ArrayLength* al = length->as_ArrayLength();
753 if (al != NULL) {
754 if (al->array() == src) {
755 // it's the length of the source array
756 flags &= ~LIR_OpArrayCopy::length_positive_check;
757 flags &= ~LIR_OpArrayCopy::src_null_check;
758 if (is_constant_zero(src_pos))
759 flags &= ~LIR_OpArrayCopy::src_range_check;
760 }
761 if (al->array() == dst) {
762 // it's the length of the destination array
763 flags &= ~LIR_OpArrayCopy::length_positive_check;
764 flags &= ~LIR_OpArrayCopy::dst_null_check;
765 if (is_constant_zero(dst_pos))
766 flags &= ~LIR_OpArrayCopy::dst_range_check;
767 }
768 }
769 if (is_exact) {
770 flags &= ~LIR_OpArrayCopy::type_check;
771 }
772 }
774 if (src == dst) {
775 // moving within a single array so no type checks are needed
776 if (flags & LIR_OpArrayCopy::type_check) {
777 flags &= ~LIR_OpArrayCopy::type_check;
778 }
779 }
780 *flagsp = flags;
781 *expected_typep = (ciArrayKlass*)expected_type;
782 }
785 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
786 assert(opr->is_register(), "why spill if item is not register?");
788 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
789 LIR_Opr result = new_register(T_FLOAT);
790 set_vreg_flag(result, must_start_in_memory);
791 assert(opr->is_register(), "only a register can be spilled");
792 assert(opr->value_type()->is_float(), "rounding only for floats available");
793 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
794 return result;
795 }
796 return opr;
797 }
800 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
801 assert(type2size[t] == type2size[value->type()], "size mismatch");
802 if (!value->is_register()) {
803 // force into a register
804 LIR_Opr r = new_register(value->type());
805 __ move(value, r);
806 value = r;
807 }
809 // create a spill location
810 LIR_Opr tmp = new_register(t);
811 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
813 // move from register to spill
814 __ move(value, tmp);
815 return tmp;
816 }
819 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
820 if (if_instr->should_profile()) {
821 ciMethod* method = if_instr->profiled_method();
822 assert(method != NULL, "method should be set if branch is profiled");
823 ciMethodData* md = method->method_data();
824 if (md == NULL) {
825 bailout("out of memory building methodDataOop");
826 return;
827 }
828 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
829 assert(data != NULL, "must have profiling data");
830 assert(data->is_BranchData(), "need BranchData for two-way branches");
831 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
832 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
833 LIR_Opr md_reg = new_register(T_OBJECT);
834 __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
835 LIR_Opr data_offset_reg = new_register(T_INT);
836 __ cmove(lir_cond(cond),
837 LIR_OprFact::intConst(taken_count_offset),
838 LIR_OprFact::intConst(not_taken_count_offset),
839 data_offset_reg);
840 LIR_Opr data_reg = new_register(T_INT);
841 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
842 __ move(LIR_OprFact::address(data_addr), data_reg);
843 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
844 // Use leal instead of add to avoid destroying condition codes on x86
845 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
846 __ move(data_reg, LIR_OprFact::address(data_addr));
847 }
848 }
851 // Phi technique:
852 // This is about passing live values from one basic block to the other.
853 // In code generated with Java it is rather rare that more than one
854 // value is on the stack from one basic block to the other.
855 // We optimize our technique for efficient passing of one value
856 // (of type long, int, double..) but it can be extended.
857 // When entering or leaving a basic block, all registers and all spill
858 // slots are release and empty. We use the released registers
859 // and spill slots to pass the live values from one block
860 // to the other. The topmost value, i.e., the value on TOS of expression
861 // stack is passed in registers. All other values are stored in spilling
862 // area. Every Phi has an index which designates its spill slot
863 // At exit of a basic block, we fill the register(s) and spill slots.
864 // At entry of a basic block, the block_prolog sets up the content of phi nodes
865 // and locks necessary registers and spilling slots.
868 // move current value to referenced phi function
869 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
870 Phi* phi = sux_val->as_Phi();
871 // cur_val can be null without phi being null in conjunction with inlining
872 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
873 LIR_Opr operand = cur_val->operand();
874 if (cur_val->operand()->is_illegal()) {
875 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
876 "these can be produced lazily");
877 operand = operand_for_instruction(cur_val);
878 }
879 resolver->move(operand, operand_for_instruction(phi));
880 }
881 }
884 // Moves all stack values into their PHI position
885 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
886 BlockBegin* bb = block();
887 if (bb->number_of_sux() == 1) {
888 BlockBegin* sux = bb->sux_at(0);
889 assert(sux->number_of_preds() > 0, "invalid CFG");
891 // a block with only one predecessor never has phi functions
892 if (sux->number_of_preds() > 1) {
893 int max_phis = cur_state->stack_size() + cur_state->locals_size();
894 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
896 ValueStack* sux_state = sux->state();
897 Value sux_value;
898 int index;
900 for_each_stack_value(sux_state, index, sux_value) {
901 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
902 }
904 // Inlining may cause the local state not to match up, so walk up
905 // the caller state until we get to the same scope as the
906 // successor and then start processing from there.
907 while (cur_state->scope() != sux_state->scope()) {
908 cur_state = cur_state->caller_state();
909 assert(cur_state != NULL, "scopes don't match up");
910 }
912 for_each_local_value(sux_state, index, sux_value) {
913 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
914 }
916 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
917 }
918 }
919 }
922 LIR_Opr LIRGenerator::new_register(BasicType type) {
923 int vreg = _virtual_register_number;
924 // add a little fudge factor for the bailout, since the bailout is
925 // only checked periodically. This gives a few extra registers to
926 // hand out before we really run out, which helps us keep from
927 // tripping over assertions.
928 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
929 bailout("out of virtual registers");
930 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
931 // wrap it around
932 _virtual_register_number = LIR_OprDesc::vreg_base;
933 }
934 }
935 _virtual_register_number += 1;
936 if (type == T_ADDRESS) type = T_INT;
937 return LIR_OprFact::virtual_register(vreg, type);
938 }
941 // Try to lock using register in hint
942 LIR_Opr LIRGenerator::rlock(Value instr) {
943 return new_register(instr->type());
944 }
947 // does an rlock and sets result
948 LIR_Opr LIRGenerator::rlock_result(Value x) {
949 LIR_Opr reg = rlock(x);
950 set_result(x, reg);
951 return reg;
952 }
955 // does an rlock and sets result
956 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
957 LIR_Opr reg;
958 switch (type) {
959 case T_BYTE:
960 case T_BOOLEAN:
961 reg = rlock_byte(type);
962 break;
963 default:
964 reg = rlock(x);
965 break;
966 }
968 set_result(x, reg);
969 return reg;
970 }
973 //---------------------------------------------------------------------
974 ciObject* LIRGenerator::get_jobject_constant(Value value) {
975 ObjectType* oc = value->type()->as_ObjectType();
976 if (oc) {
977 return oc->constant_value();
978 }
979 return NULL;
980 }
983 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
984 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
985 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
987 // no moves are created for phi functions at the begin of exception
988 // handlers, so assign operands manually here
989 for_each_phi_fun(block(), phi,
990 operand_for_instruction(phi));
992 LIR_Opr thread_reg = getThreadPointer();
993 __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
994 exceptionOopOpr());
995 __ move(LIR_OprFact::oopConst(NULL),
996 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
997 __ move(LIR_OprFact::oopConst(NULL),
998 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1000 LIR_Opr result = new_register(T_OBJECT);
1001 __ move(exceptionOopOpr(), result);
1002 set_result(x, result);
1003 }
1006 //----------------------------------------------------------------------
1007 //----------------------------------------------------------------------
1008 //----------------------------------------------------------------------
1009 //----------------------------------------------------------------------
1010 // visitor functions
1011 //----------------------------------------------------------------------
1012 //----------------------------------------------------------------------
1013 //----------------------------------------------------------------------
1014 //----------------------------------------------------------------------
1016 void LIRGenerator::do_Phi(Phi* x) {
1017 // phi functions are never visited directly
1018 ShouldNotReachHere();
1019 }
1022 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1023 void LIRGenerator::do_Constant(Constant* x) {
1024 if (x->state() != NULL) {
1025 // Any constant with a ValueStack requires patching so emit the patch here
1026 LIR_Opr reg = rlock_result(x);
1027 CodeEmitInfo* info = state_for(x, x->state());
1028 __ oop2reg_patch(NULL, reg, info);
1029 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1030 if (!x->is_pinned()) {
1031 // unpinned constants are handled specially so that they can be
1032 // put into registers when they are used multiple times within a
1033 // block. After the block completes their operand will be
1034 // cleared so that other blocks can't refer to that register.
1035 set_result(x, load_constant(x));
1036 } else {
1037 LIR_Opr res = x->operand();
1038 if (!res->is_valid()) {
1039 res = LIR_OprFact::value_type(x->type());
1040 }
1041 if (res->is_constant()) {
1042 LIR_Opr reg = rlock_result(x);
1043 __ move(res, reg);
1044 } else {
1045 set_result(x, res);
1046 }
1047 }
1048 } else {
1049 set_result(x, LIR_OprFact::value_type(x->type()));
1050 }
1051 }
1054 void LIRGenerator::do_Local(Local* x) {
1055 // operand_for_instruction has the side effect of setting the result
1056 // so there's no need to do it here.
1057 operand_for_instruction(x);
1058 }
1061 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1062 Unimplemented();
1063 }
1066 void LIRGenerator::do_Return(Return* x) {
1067 if (compilation()->env()->dtrace_method_probes()) {
1068 BasicTypeList signature;
1069 signature.append(T_INT); // thread
1070 signature.append(T_OBJECT); // methodOop
1071 LIR_OprList* args = new LIR_OprList();
1072 args->append(getThreadPointer());
1073 LIR_Opr meth = new_register(T_OBJECT);
1074 __ oop2reg(method()->constant_encoding(), meth);
1075 args->append(meth);
1076 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1077 }
1079 if (x->type()->is_void()) {
1080 __ return_op(LIR_OprFact::illegalOpr);
1081 } else {
1082 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1083 LIRItem result(x->result(), this);
1085 result.load_item_force(reg);
1086 __ return_op(result.result());
1087 }
1088 set_no_result(x);
1089 }
1092 // Example: object.getClass ()
1093 void LIRGenerator::do_getClass(Intrinsic* x) {
1094 assert(x->number_of_arguments() == 1, "wrong type");
1096 LIRItem rcvr(x->argument_at(0), this);
1097 rcvr.load_item();
1098 LIR_Opr result = rlock_result(x);
1100 // need to perform the null check on the rcvr
1101 CodeEmitInfo* info = NULL;
1102 if (x->needs_null_check()) {
1103 info = state_for(x, x->state()->copy_locks());
1104 }
1105 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
1106 __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
1107 klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
1108 }
1111 // Example: Thread.currentThread()
1112 void LIRGenerator::do_currentThread(Intrinsic* x) {
1113 assert(x->number_of_arguments() == 0, "wrong type");
1114 LIR_Opr reg = rlock_result(x);
1115 __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1116 }
1119 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1120 assert(x->number_of_arguments() == 1, "wrong type");
1121 LIRItem receiver(x->argument_at(0), this);
1123 receiver.load_item();
1124 BasicTypeList signature;
1125 signature.append(T_OBJECT); // receiver
1126 LIR_OprList* args = new LIR_OprList();
1127 args->append(receiver.result());
1128 CodeEmitInfo* info = state_for(x, x->state());
1129 call_runtime(&signature, args,
1130 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1131 voidType, info);
1133 set_no_result(x);
1134 }
1137 //------------------------local access--------------------------------------
1139 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1140 if (x->operand()->is_illegal()) {
1141 Constant* c = x->as_Constant();
1142 if (c != NULL) {
1143 x->set_operand(LIR_OprFact::value_type(c->type()));
1144 } else {
1145 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1146 // allocate a virtual register for this local or phi
1147 x->set_operand(rlock(x));
1148 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1149 }
1150 }
1151 return x->operand();
1152 }
1155 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1156 if (opr->is_virtual()) {
1157 return instruction_for_vreg(opr->vreg_number());
1158 }
1159 return NULL;
1160 }
1163 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1164 if (reg_num < _instruction_for_operand.length()) {
1165 return _instruction_for_operand.at(reg_num);
1166 }
1167 return NULL;
1168 }
1171 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1172 if (_vreg_flags.size_in_bits() == 0) {
1173 BitMap2D temp(100, num_vreg_flags);
1174 temp.clear();
1175 _vreg_flags = temp;
1176 }
1177 _vreg_flags.at_put_grow(vreg_num, f, true);
1178 }
1180 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1181 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1182 return false;
1183 }
1184 return _vreg_flags.at(vreg_num, f);
1185 }
1188 // Block local constant handling. This code is useful for keeping
1189 // unpinned constants and constants which aren't exposed in the IR in
1190 // registers. Unpinned Constant instructions have their operands
1191 // cleared when the block is finished so that other blocks can't end
1192 // up referring to their registers.
1194 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1195 assert(!x->is_pinned(), "only for unpinned constants");
1196 _unpinned_constants.append(x);
1197 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1198 }
1201 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1202 BasicType t = c->type();
1203 for (int i = 0; i < _constants.length(); i++) {
1204 LIR_Const* other = _constants.at(i);
1205 if (t == other->type()) {
1206 switch (t) {
1207 case T_INT:
1208 case T_FLOAT:
1209 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1210 break;
1211 case T_LONG:
1212 case T_DOUBLE:
1213 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1214 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1215 break;
1216 case T_OBJECT:
1217 if (c->as_jobject() != other->as_jobject()) continue;
1218 break;
1219 }
1220 return _reg_for_constants.at(i);
1221 }
1222 }
1224 LIR_Opr result = new_register(t);
1225 __ move((LIR_Opr)c, result);
1226 _constants.append(c);
1227 _reg_for_constants.append(result);
1228 return result;
1229 }
1231 // Various barriers
1233 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
1234 // Do the pre-write barrier, if any.
1235 switch (_bs->kind()) {
1236 #ifndef SERIALGC
1237 case BarrierSet::G1SATBCT:
1238 case BarrierSet::G1SATBCTLogging:
1239 G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
1240 break;
1241 #endif // SERIALGC
1242 case BarrierSet::CardTableModRef:
1243 case BarrierSet::CardTableExtension:
1244 // No pre barriers
1245 break;
1246 case BarrierSet::ModRef:
1247 case BarrierSet::Other:
1248 // No pre barriers
1249 break;
1250 default :
1251 ShouldNotReachHere();
1253 }
1254 }
1256 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1257 switch (_bs->kind()) {
1258 #ifndef SERIALGC
1259 case BarrierSet::G1SATBCT:
1260 case BarrierSet::G1SATBCTLogging:
1261 G1SATBCardTableModRef_post_barrier(addr, new_val);
1262 break;
1263 #endif // SERIALGC
1264 case BarrierSet::CardTableModRef:
1265 case BarrierSet::CardTableExtension:
1266 CardTableModRef_post_barrier(addr, new_val);
1267 break;
1268 case BarrierSet::ModRef:
1269 case BarrierSet::Other:
1270 // No post barriers
1271 break;
1272 default :
1273 ShouldNotReachHere();
1274 }
1275 }
1277 ////////////////////////////////////////////////////////////////////////
1278 #ifndef SERIALGC
1280 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) {
1281 if (G1DisablePreBarrier) return;
1283 // First we test whether marking is in progress.
1284 BasicType flag_type;
1285 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1286 flag_type = T_INT;
1287 } else {
1288 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1289 "Assumption");
1290 flag_type = T_BYTE;
1291 }
1292 LIR_Opr thrd = getThreadPointer();
1293 LIR_Address* mark_active_flag_addr =
1294 new LIR_Address(thrd,
1295 in_bytes(JavaThread::satb_mark_queue_offset() +
1296 PtrQueue::byte_offset_of_active()),
1297 flag_type);
1298 // Read the marking-in-progress flag.
1299 LIR_Opr flag_val = new_register(T_INT);
1300 __ load(mark_active_flag_addr, flag_val);
1302 LabelObj* start_store = new LabelObj();
1304 LIR_PatchCode pre_val_patch_code =
1305 patch ? lir_patch_normal : lir_patch_none;
1307 LIR_Opr pre_val = new_register(T_OBJECT);
1309 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1310 if (!addr_opr->is_address()) {
1311 assert(addr_opr->is_register(), "must be");
1312 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, 0, T_OBJECT));
1313 }
1314 CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
1315 info);
1316 __ branch(lir_cond_notEqual, T_INT, slow);
1317 __ branch_destination(slow->continuation());
1318 }
1320 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1321 if (G1DisablePostBarrier) return;
1323 // If the "new_val" is a constant NULL, no barrier is necessary.
1324 if (new_val->is_constant() &&
1325 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1327 if (!new_val->is_register()) {
1328 LIR_Opr new_val_reg = new_pointer_register();
1329 if (new_val->is_constant()) {
1330 __ move(new_val, new_val_reg);
1331 } else {
1332 __ leal(new_val, new_val_reg);
1333 }
1334 new_val = new_val_reg;
1335 }
1336 assert(new_val->is_register(), "must be a register at this point");
1338 if (addr->is_address()) {
1339 LIR_Address* address = addr->as_address_ptr();
1340 LIR_Opr ptr = new_pointer_register();
1341 if (!address->index()->is_valid() && address->disp() == 0) {
1342 __ move(address->base(), ptr);
1343 } else {
1344 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1345 __ leal(addr, ptr);
1346 }
1347 addr = ptr;
1348 }
1349 assert(addr->is_register(), "must be a register at this point");
1351 LIR_Opr xor_res = new_pointer_register();
1352 LIR_Opr xor_shift_res = new_pointer_register();
1354 if (TwoOperandLIRForm ) {
1355 __ move(addr, xor_res);
1356 __ logical_xor(xor_res, new_val, xor_res);
1357 __ move(xor_res, xor_shift_res);
1358 __ unsigned_shift_right(xor_shift_res,
1359 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1360 xor_shift_res,
1361 LIR_OprDesc::illegalOpr());
1362 } else {
1363 __ logical_xor(addr, new_val, xor_res);
1364 __ unsigned_shift_right(xor_res,
1365 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1366 xor_shift_res,
1367 LIR_OprDesc::illegalOpr());
1368 }
1370 if (!new_val->is_register()) {
1371 LIR_Opr new_val_reg = new_pointer_register();
1372 __ leal(new_val, new_val_reg);
1373 new_val = new_val_reg;
1374 }
1375 assert(new_val->is_register(), "must be a register at this point");
1377 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1379 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1380 __ branch(lir_cond_notEqual, T_INT, slow);
1381 __ branch_destination(slow->continuation());
1382 }
1384 #endif // SERIALGC
1385 ////////////////////////////////////////////////////////////////////////
1387 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1389 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1390 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1391 if (addr->is_address()) {
1392 LIR_Address* address = addr->as_address_ptr();
1393 LIR_Opr ptr = new_register(T_OBJECT);
1394 if (!address->index()->is_valid() && address->disp() == 0) {
1395 __ move(address->base(), ptr);
1396 } else {
1397 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1398 __ leal(addr, ptr);
1399 }
1400 addr = ptr;
1401 }
1402 assert(addr->is_register(), "must be a register at this point");
1404 LIR_Opr tmp = new_pointer_register();
1405 if (TwoOperandLIRForm) {
1406 __ move(addr, tmp);
1407 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1408 } else {
1409 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1410 }
1411 if (can_inline_as_constant(card_table_base)) {
1412 __ move(LIR_OprFact::intConst(0),
1413 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1414 } else {
1415 __ move(LIR_OprFact::intConst(0),
1416 new LIR_Address(tmp, load_constant(card_table_base),
1417 T_BYTE));
1418 }
1419 }
1422 //------------------------field access--------------------------------------
1424 // Comment copied form templateTable_i486.cpp
1425 // ----------------------------------------------------------------------------
1426 // Volatile variables demand their effects be made known to all CPU's in
1427 // order. Store buffers on most chips allow reads & writes to reorder; the
1428 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1429 // memory barrier (i.e., it's not sufficient that the interpreter does not
1430 // reorder volatile references, the hardware also must not reorder them).
1431 //
1432 // According to the new Java Memory Model (JMM):
1433 // (1) All volatiles are serialized wrt to each other.
1434 // ALSO reads & writes act as aquire & release, so:
1435 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1436 // the read float up to before the read. It's OK for non-volatile memory refs
1437 // that happen before the volatile read to float down below it.
1438 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1439 // that happen BEFORE the write float down to after the write. It's OK for
1440 // non-volatile memory refs that happen after the volatile write to float up
1441 // before it.
1442 //
1443 // We only put in barriers around volatile refs (they are expensive), not
1444 // _between_ memory refs (that would require us to track the flavor of the
1445 // previous memory refs). Requirements (2) and (3) require some barriers
1446 // before volatile stores and after volatile loads. These nearly cover
1447 // requirement (1) but miss the volatile-store-volatile-load case. This final
1448 // case is placed after volatile-stores although it could just as well go
1449 // before volatile-loads.
1452 void LIRGenerator::do_StoreField(StoreField* x) {
1453 bool needs_patching = x->needs_patching();
1454 bool is_volatile = x->field()->is_volatile();
1455 BasicType field_type = x->field_type();
1456 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1458 CodeEmitInfo* info = NULL;
1459 if (needs_patching) {
1460 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1461 info = state_for(x, x->state_before());
1462 } else if (x->needs_null_check()) {
1463 NullCheck* nc = x->explicit_null_check();
1464 if (nc == NULL) {
1465 info = state_for(x, x->lock_stack());
1466 } else {
1467 info = state_for(nc);
1468 }
1469 }
1472 LIRItem object(x->obj(), this);
1473 LIRItem value(x->value(), this);
1475 object.load_item();
1477 if (is_volatile || needs_patching) {
1478 // load item if field is volatile (fewer special cases for volatiles)
1479 // load item if field not initialized
1480 // load item if field not constant
1481 // because of code patching we cannot inline constants
1482 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1483 value.load_byte_item();
1484 } else {
1485 value.load_item();
1486 }
1487 } else {
1488 value.load_for_store(field_type);
1489 }
1491 set_no_result(x);
1493 if (PrintNotLoaded && needs_patching) {
1494 tty->print_cr(" ###class not loaded at store_%s bci %d",
1495 x->is_static() ? "static" : "field", x->bci());
1496 }
1498 if (x->needs_null_check() &&
1499 (needs_patching ||
1500 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1501 // emit an explicit null check because the offset is too large
1502 __ null_check(object.result(), new CodeEmitInfo(info));
1503 }
1505 LIR_Address* address;
1506 if (needs_patching) {
1507 // we need to patch the offset in the instruction so don't allow
1508 // generate_address to try to be smart about emitting the -1.
1509 // Otherwise the patching code won't know how to find the
1510 // instruction to patch.
1511 address = new LIR_Address(object.result(), max_jint, field_type);
1512 } else {
1513 address = generate_address(object.result(), x->offset(), field_type);
1514 }
1516 if (is_volatile && os::is_MP()) {
1517 __ membar_release();
1518 }
1520 if (is_oop) {
1521 // Do the pre-write barrier, if any.
1522 pre_barrier(LIR_OprFact::address(address),
1523 needs_patching,
1524 (info ? new CodeEmitInfo(info) : NULL));
1525 }
1527 if (is_volatile) {
1528 assert(!needs_patching && x->is_loaded(),
1529 "how do we know it's volatile if it's not loaded");
1530 volatile_field_store(value.result(), address, info);
1531 } else {
1532 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1533 __ store(value.result(), address, info, patch_code);
1534 }
1536 if (is_oop) {
1537 // Store to object so mark the card of the header
1538 post_barrier(object.result(), value.result());
1539 }
1541 if (is_volatile && os::is_MP()) {
1542 __ membar();
1543 }
1544 }
1547 void LIRGenerator::do_LoadField(LoadField* x) {
1548 bool needs_patching = x->needs_patching();
1549 bool is_volatile = x->field()->is_volatile();
1550 BasicType field_type = x->field_type();
1552 CodeEmitInfo* info = NULL;
1553 if (needs_patching) {
1554 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1555 info = state_for(x, x->state_before());
1556 } else if (x->needs_null_check()) {
1557 NullCheck* nc = x->explicit_null_check();
1558 if (nc == NULL) {
1559 info = state_for(x, x->lock_stack());
1560 } else {
1561 info = state_for(nc);
1562 }
1563 }
1565 LIRItem object(x->obj(), this);
1567 object.load_item();
1569 if (PrintNotLoaded && needs_patching) {
1570 tty->print_cr(" ###class not loaded at load_%s bci %d",
1571 x->is_static() ? "static" : "field", x->bci());
1572 }
1574 if (x->needs_null_check() &&
1575 (needs_patching ||
1576 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1577 // emit an explicit null check because the offset is too large
1578 __ null_check(object.result(), new CodeEmitInfo(info));
1579 }
1581 LIR_Opr reg = rlock_result(x, field_type);
1582 LIR_Address* address;
1583 if (needs_patching) {
1584 // we need to patch the offset in the instruction so don't allow
1585 // generate_address to try to be smart about emitting the -1.
1586 // Otherwise the patching code won't know how to find the
1587 // instruction to patch.
1588 address = new LIR_Address(object.result(), max_jint, field_type);
1589 } else {
1590 address = generate_address(object.result(), x->offset(), field_type);
1591 }
1593 if (is_volatile) {
1594 assert(!needs_patching && x->is_loaded(),
1595 "how do we know it's volatile if it's not loaded");
1596 volatile_field_load(address, reg, info);
1597 } else {
1598 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1599 __ load(address, reg, info, patch_code);
1600 }
1602 if (is_volatile && os::is_MP()) {
1603 __ membar_acquire();
1604 }
1605 }
1608 //------------------------java.nio.Buffer.checkIndex------------------------
1610 // int java.nio.Buffer.checkIndex(int)
1611 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1612 // NOTE: by the time we are in checkIndex() we are guaranteed that
1613 // the buffer is non-null (because checkIndex is package-private and
1614 // only called from within other methods in the buffer).
1615 assert(x->number_of_arguments() == 2, "wrong type");
1616 LIRItem buf (x->argument_at(0), this);
1617 LIRItem index(x->argument_at(1), this);
1618 buf.load_item();
1619 index.load_item();
1621 LIR_Opr result = rlock_result(x);
1622 if (GenerateRangeChecks) {
1623 CodeEmitInfo* info = state_for(x);
1624 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1625 if (index.result()->is_constant()) {
1626 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1627 __ branch(lir_cond_belowEqual, T_INT, stub);
1628 } else {
1629 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1630 java_nio_Buffer::limit_offset(), T_INT, info);
1631 __ branch(lir_cond_aboveEqual, T_INT, stub);
1632 }
1633 __ move(index.result(), result);
1634 } else {
1635 // Just load the index into the result register
1636 __ move(index.result(), result);
1637 }
1638 }
1641 //------------------------array access--------------------------------------
1644 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1645 LIRItem array(x->array(), this);
1646 array.load_item();
1647 LIR_Opr reg = rlock_result(x);
1649 CodeEmitInfo* info = NULL;
1650 if (x->needs_null_check()) {
1651 NullCheck* nc = x->explicit_null_check();
1652 if (nc == NULL) {
1653 info = state_for(x);
1654 } else {
1655 info = state_for(nc);
1656 }
1657 }
1658 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1659 }
1662 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1663 bool use_length = x->length() != NULL;
1664 LIRItem array(x->array(), this);
1665 LIRItem index(x->index(), this);
1666 LIRItem length(this);
1667 bool needs_range_check = true;
1669 if (use_length) {
1670 needs_range_check = x->compute_needs_range_check();
1671 if (needs_range_check) {
1672 length.set_instruction(x->length());
1673 length.load_item();
1674 }
1675 }
1677 array.load_item();
1678 if (index.is_constant() && can_inline_as_constant(x->index())) {
1679 // let it be a constant
1680 index.dont_load_item();
1681 } else {
1682 index.load_item();
1683 }
1685 CodeEmitInfo* range_check_info = state_for(x);
1686 CodeEmitInfo* null_check_info = NULL;
1687 if (x->needs_null_check()) {
1688 NullCheck* nc = x->explicit_null_check();
1689 if (nc != NULL) {
1690 null_check_info = state_for(nc);
1691 } else {
1692 null_check_info = range_check_info;
1693 }
1694 }
1696 // emit array address setup early so it schedules better
1697 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1699 if (GenerateRangeChecks && needs_range_check) {
1700 if (use_length) {
1701 // TODO: use a (modified) version of array_range_check that does not require a
1702 // constant length to be loaded to a register
1703 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1704 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1705 } else {
1706 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1707 // The range check performs the null check, so clear it out for the load
1708 null_check_info = NULL;
1709 }
1710 }
1712 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1713 }
1716 void LIRGenerator::do_NullCheck(NullCheck* x) {
1717 if (x->can_trap()) {
1718 LIRItem value(x->obj(), this);
1719 value.load_item();
1720 CodeEmitInfo* info = state_for(x);
1721 __ null_check(value.result(), info);
1722 }
1723 }
1726 void LIRGenerator::do_Throw(Throw* x) {
1727 LIRItem exception(x->exception(), this);
1728 exception.load_item();
1729 set_no_result(x);
1730 LIR_Opr exception_opr = exception.result();
1731 CodeEmitInfo* info = state_for(x, x->state());
1733 #ifndef PRODUCT
1734 if (PrintC1Statistics) {
1735 increment_counter(Runtime1::throw_count_address());
1736 }
1737 #endif
1739 // check if the instruction has an xhandler in any of the nested scopes
1740 bool unwind = false;
1741 if (info->exception_handlers()->length() == 0) {
1742 // this throw is not inside an xhandler
1743 unwind = true;
1744 } else {
1745 // get some idea of the throw type
1746 bool type_is_exact = true;
1747 ciType* throw_type = x->exception()->exact_type();
1748 if (throw_type == NULL) {
1749 type_is_exact = false;
1750 throw_type = x->exception()->declared_type();
1751 }
1752 if (throw_type != NULL && throw_type->is_instance_klass()) {
1753 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1754 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1755 }
1756 }
1758 // do null check before moving exception oop into fixed register
1759 // to avoid a fixed interval with an oop during the null check.
1760 // Use a copy of the CodeEmitInfo because debug information is
1761 // different for null_check and throw.
1762 if (GenerateCompilerNullChecks &&
1763 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1764 // if the exception object wasn't created using new then it might be null.
1765 __ null_check(exception_opr, new CodeEmitInfo(info, true));
1766 }
1768 if (compilation()->env()->jvmti_can_post_exceptions() &&
1769 !block()->is_set(BlockBegin::default_exception_handler_flag)) {
1770 // we need to go through the exception lookup path to get JVMTI
1771 // notification done
1772 unwind = false;
1773 }
1775 assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind,
1776 "should be no more handlers to dispatch to");
1778 if (compilation()->env()->dtrace_method_probes() &&
1779 block()->is_set(BlockBegin::default_exception_handler_flag)) {
1780 // notify that this frame is unwinding
1781 BasicTypeList signature;
1782 signature.append(T_INT); // thread
1783 signature.append(T_OBJECT); // methodOop
1784 LIR_OprList* args = new LIR_OprList();
1785 args->append(getThreadPointer());
1786 LIR_Opr meth = new_register(T_OBJECT);
1787 __ oop2reg(method()->constant_encoding(), meth);
1788 args->append(meth);
1789 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1790 }
1792 // move exception oop into fixed register
1793 __ move(exception_opr, exceptionOopOpr());
1795 if (unwind) {
1796 __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
1797 } else {
1798 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
1799 }
1800 }
1803 void LIRGenerator::do_RoundFP(RoundFP* x) {
1804 LIRItem input(x->input(), this);
1805 input.load_item();
1806 LIR_Opr input_opr = input.result();
1807 assert(input_opr->is_register(), "why round if value is not in a register?");
1808 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
1809 if (input_opr->is_single_fpu()) {
1810 set_result(x, round_item(input_opr)); // This code path not currently taken
1811 } else {
1812 LIR_Opr result = new_register(T_DOUBLE);
1813 set_vreg_flag(result, must_start_in_memory);
1814 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
1815 set_result(x, result);
1816 }
1817 }
1819 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
1820 LIRItem base(x->base(), this);
1821 LIRItem idx(this);
1823 base.load_item();
1824 if (x->has_index()) {
1825 idx.set_instruction(x->index());
1826 idx.load_nonconstant();
1827 }
1829 LIR_Opr reg = rlock_result(x, x->basic_type());
1831 int log2_scale = 0;
1832 if (x->has_index()) {
1833 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
1834 log2_scale = x->log2_scale();
1835 }
1837 assert(!x->has_index() || idx.value() == x->index(), "should match");
1839 LIR_Opr base_op = base.result();
1840 #ifndef _LP64
1841 if (x->base()->type()->tag() == longTag) {
1842 base_op = new_register(T_INT);
1843 __ convert(Bytecodes::_l2i, base.result(), base_op);
1844 } else {
1845 assert(x->base()->type()->tag() == intTag, "must be");
1846 }
1847 #endif
1849 BasicType dst_type = x->basic_type();
1850 LIR_Opr index_op = idx.result();
1852 LIR_Address* addr;
1853 if (index_op->is_constant()) {
1854 assert(log2_scale == 0, "must not have a scale");
1855 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
1856 } else {
1857 #ifdef X86
1858 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
1859 #else
1860 if (index_op->is_illegal() || log2_scale == 0) {
1861 addr = new LIR_Address(base_op, index_op, dst_type);
1862 } else {
1863 LIR_Opr tmp = new_register(T_INT);
1864 __ shift_left(index_op, log2_scale, tmp);
1865 addr = new LIR_Address(base_op, tmp, dst_type);
1866 }
1867 #endif
1868 }
1870 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
1871 __ unaligned_move(addr, reg);
1872 } else {
1873 __ move(addr, reg);
1874 }
1875 }
1878 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
1879 int log2_scale = 0;
1880 BasicType type = x->basic_type();
1882 if (x->has_index()) {
1883 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
1884 log2_scale = x->log2_scale();
1885 }
1887 LIRItem base(x->base(), this);
1888 LIRItem value(x->value(), this);
1889 LIRItem idx(this);
1891 base.load_item();
1892 if (x->has_index()) {
1893 idx.set_instruction(x->index());
1894 idx.load_item();
1895 }
1897 if (type == T_BYTE || type == T_BOOLEAN) {
1898 value.load_byte_item();
1899 } else {
1900 value.load_item();
1901 }
1903 set_no_result(x);
1905 LIR_Opr base_op = base.result();
1906 #ifndef _LP64
1907 if (x->base()->type()->tag() == longTag) {
1908 base_op = new_register(T_INT);
1909 __ convert(Bytecodes::_l2i, base.result(), base_op);
1910 } else {
1911 assert(x->base()->type()->tag() == intTag, "must be");
1912 }
1913 #endif
1915 LIR_Opr index_op = idx.result();
1916 if (log2_scale != 0) {
1917 // temporary fix (platform dependent code without shift on Intel would be better)
1918 index_op = new_register(T_INT);
1919 __ move(idx.result(), index_op);
1920 __ shift_left(index_op, log2_scale, index_op);
1921 }
1923 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
1924 __ move(value.result(), addr);
1925 }
1928 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
1929 BasicType type = x->basic_type();
1930 LIRItem src(x->object(), this);
1931 LIRItem off(x->offset(), this);
1933 off.load_item();
1934 src.load_item();
1936 LIR_Opr reg = reg = rlock_result(x, x->basic_type());
1938 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
1939 get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
1940 if (x->is_volatile() && os::is_MP()) __ membar();
1941 }
1944 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
1945 BasicType type = x->basic_type();
1946 LIRItem src(x->object(), this);
1947 LIRItem off(x->offset(), this);
1948 LIRItem data(x->value(), this);
1950 src.load_item();
1951 if (type == T_BOOLEAN || type == T_BYTE) {
1952 data.load_byte_item();
1953 } else {
1954 data.load_item();
1955 }
1956 off.load_item();
1958 set_no_result(x);
1960 if (x->is_volatile() && os::is_MP()) __ membar_release();
1961 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
1962 }
1965 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
1966 LIRItem src(x->object(), this);
1967 LIRItem off(x->offset(), this);
1969 src.load_item();
1970 if (off.is_constant() && can_inline_as_constant(x->offset())) {
1971 // let it be a constant
1972 off.dont_load_item();
1973 } else {
1974 off.load_item();
1975 }
1977 set_no_result(x);
1979 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
1980 __ prefetch(addr, is_store);
1981 }
1984 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
1985 do_UnsafePrefetch(x, false);
1986 }
1989 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
1990 do_UnsafePrefetch(x, true);
1991 }
1994 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
1995 int lng = x->length();
1997 for (int i = 0; i < lng; i++) {
1998 SwitchRange* one_range = x->at(i);
1999 int low_key = one_range->low_key();
2000 int high_key = one_range->high_key();
2001 BlockBegin* dest = one_range->sux();
2002 if (low_key == high_key) {
2003 __ cmp(lir_cond_equal, value, low_key);
2004 __ branch(lir_cond_equal, T_INT, dest);
2005 } else if (high_key - low_key == 1) {
2006 __ cmp(lir_cond_equal, value, low_key);
2007 __ branch(lir_cond_equal, T_INT, dest);
2008 __ cmp(lir_cond_equal, value, high_key);
2009 __ branch(lir_cond_equal, T_INT, dest);
2010 } else {
2011 LabelObj* L = new LabelObj();
2012 __ cmp(lir_cond_less, value, low_key);
2013 __ branch(lir_cond_less, L->label());
2014 __ cmp(lir_cond_lessEqual, value, high_key);
2015 __ branch(lir_cond_lessEqual, T_INT, dest);
2016 __ branch_destination(L->label());
2017 }
2018 }
2019 __ jump(default_sux);
2020 }
2023 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2024 SwitchRangeList* res = new SwitchRangeList();
2025 int len = x->length();
2026 if (len > 0) {
2027 BlockBegin* sux = x->sux_at(0);
2028 int key = x->lo_key();
2029 BlockBegin* default_sux = x->default_sux();
2030 SwitchRange* range = new SwitchRange(key, sux);
2031 for (int i = 0; i < len; i++, key++) {
2032 BlockBegin* new_sux = x->sux_at(i);
2033 if (sux == new_sux) {
2034 // still in same range
2035 range->set_high_key(key);
2036 } else {
2037 // skip tests which explicitly dispatch to the default
2038 if (sux != default_sux) {
2039 res->append(range);
2040 }
2041 range = new SwitchRange(key, new_sux);
2042 }
2043 sux = new_sux;
2044 }
2045 if (res->length() == 0 || res->last() != range) res->append(range);
2046 }
2047 return res;
2048 }
2051 // we expect the keys to be sorted by increasing value
2052 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2053 SwitchRangeList* res = new SwitchRangeList();
2054 int len = x->length();
2055 if (len > 0) {
2056 BlockBegin* default_sux = x->default_sux();
2057 int key = x->key_at(0);
2058 BlockBegin* sux = x->sux_at(0);
2059 SwitchRange* range = new SwitchRange(key, sux);
2060 for (int i = 1; i < len; i++) {
2061 int new_key = x->key_at(i);
2062 BlockBegin* new_sux = x->sux_at(i);
2063 if (key+1 == new_key && sux == new_sux) {
2064 // still in same range
2065 range->set_high_key(new_key);
2066 } else {
2067 // skip tests which explicitly dispatch to the default
2068 if (range->sux() != default_sux) {
2069 res->append(range);
2070 }
2071 range = new SwitchRange(new_key, new_sux);
2072 }
2073 key = new_key;
2074 sux = new_sux;
2075 }
2076 if (res->length() == 0 || res->last() != range) res->append(range);
2077 }
2078 return res;
2079 }
2082 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2083 LIRItem tag(x->tag(), this);
2084 tag.load_item();
2085 set_no_result(x);
2087 if (x->is_safepoint()) {
2088 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2089 }
2091 // move values into phi locations
2092 move_to_phi(x->state());
2094 int lo_key = x->lo_key();
2095 int hi_key = x->hi_key();
2096 int len = x->length();
2097 CodeEmitInfo* info = state_for(x, x->state());
2098 LIR_Opr value = tag.result();
2099 if (UseTableRanges) {
2100 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2101 } else {
2102 for (int i = 0; i < len; i++) {
2103 __ cmp(lir_cond_equal, value, i + lo_key);
2104 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2105 }
2106 __ jump(x->default_sux());
2107 }
2108 }
2111 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2112 LIRItem tag(x->tag(), this);
2113 tag.load_item();
2114 set_no_result(x);
2116 if (x->is_safepoint()) {
2117 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2118 }
2120 // move values into phi locations
2121 move_to_phi(x->state());
2123 LIR_Opr value = tag.result();
2124 if (UseTableRanges) {
2125 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2126 } else {
2127 int len = x->length();
2128 for (int i = 0; i < len; i++) {
2129 __ cmp(lir_cond_equal, value, x->key_at(i));
2130 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2131 }
2132 __ jump(x->default_sux());
2133 }
2134 }
2137 void LIRGenerator::do_Goto(Goto* x) {
2138 set_no_result(x);
2140 if (block()->next()->as_OsrEntry()) {
2141 // need to free up storage used for OSR entry point
2142 LIR_Opr osrBuffer = block()->next()->operand();
2143 BasicTypeList signature;
2144 signature.append(T_INT);
2145 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2146 __ move(osrBuffer, cc->args()->at(0));
2147 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2148 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2149 }
2151 if (x->is_safepoint()) {
2152 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2154 // increment backedge counter if needed
2155 increment_backedge_counter(state_for(x, state));
2157 CodeEmitInfo* safepoint_info = state_for(x, state);
2158 __ safepoint(safepoint_poll_register(), safepoint_info);
2159 }
2161 // emit phi-instruction move after safepoint since this simplifies
2162 // describing the state as the safepoint.
2163 move_to_phi(x->state());
2165 __ jump(x->default_sux());
2166 }
2169 void LIRGenerator::do_Base(Base* x) {
2170 __ std_entry(LIR_OprFact::illegalOpr);
2171 // Emit moves from physical registers / stack slots to virtual registers
2172 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2173 IRScope* irScope = compilation()->hir()->top_scope();
2174 int java_index = 0;
2175 for (int i = 0; i < args->length(); i++) {
2176 LIR_Opr src = args->at(i);
2177 assert(!src->is_illegal(), "check");
2178 BasicType t = src->type();
2180 // Types which are smaller than int are passed as int, so
2181 // correct the type which passed.
2182 switch (t) {
2183 case T_BYTE:
2184 case T_BOOLEAN:
2185 case T_SHORT:
2186 case T_CHAR:
2187 t = T_INT;
2188 break;
2189 }
2191 LIR_Opr dest = new_register(t);
2192 __ move(src, dest);
2194 // Assign new location to Local instruction for this local
2195 Local* local = x->state()->local_at(java_index)->as_Local();
2196 assert(local != NULL, "Locals for incoming arguments must have been created");
2197 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2198 local->set_operand(dest);
2199 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2200 java_index += type2size[t];
2201 }
2203 if (compilation()->env()->dtrace_method_probes()) {
2204 BasicTypeList signature;
2205 signature.append(T_INT); // thread
2206 signature.append(T_OBJECT); // methodOop
2207 LIR_OprList* args = new LIR_OprList();
2208 args->append(getThreadPointer());
2209 LIR_Opr meth = new_register(T_OBJECT);
2210 __ oop2reg(method()->constant_encoding(), meth);
2211 args->append(meth);
2212 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2213 }
2215 if (method()->is_synchronized()) {
2216 LIR_Opr obj;
2217 if (method()->is_static()) {
2218 obj = new_register(T_OBJECT);
2219 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2220 } else {
2221 Local* receiver = x->state()->local_at(0)->as_Local();
2222 assert(receiver != NULL, "must already exist");
2223 obj = receiver->operand();
2224 }
2225 assert(obj->is_valid(), "must be valid");
2227 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2228 LIR_Opr lock = new_register(T_INT);
2229 __ load_stack_address_monitor(0, lock);
2231 CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL);
2232 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2234 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2235 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2236 }
2237 }
2239 // increment invocation counters if needed
2240 increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL));
2242 // all blocks with a successor must end with an unconditional jump
2243 // to the successor even if they are consecutive
2244 __ jump(x->default_sux());
2245 }
2248 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2249 // construct our frame and model the production of incoming pointer
2250 // to the OSR buffer.
2251 __ osr_entry(LIR_Assembler::osrBufferPointer());
2252 LIR_Opr result = rlock_result(x);
2253 __ move(LIR_Assembler::osrBufferPointer(), result);
2254 }
2257 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2258 int i = x->has_receiver() ? 1 : 0;
2259 for (; i < args->length(); i++) {
2260 LIRItem* param = args->at(i);
2261 LIR_Opr loc = arg_list->at(i);
2262 if (loc->is_register()) {
2263 param->load_item_force(loc);
2264 } else {
2265 LIR_Address* addr = loc->as_address_ptr();
2266 param->load_for_store(addr->type());
2267 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2268 __ unaligned_move(param->result(), addr);
2269 } else {
2270 __ move(param->result(), addr);
2271 }
2272 }
2273 }
2275 if (x->has_receiver()) {
2276 LIRItem* receiver = args->at(0);
2277 LIR_Opr loc = arg_list->at(0);
2278 if (loc->is_register()) {
2279 receiver->load_item_force(loc);
2280 } else {
2281 assert(loc->is_address(), "just checking");
2282 receiver->load_for_store(T_OBJECT);
2283 __ move(receiver->result(), loc);
2284 }
2285 }
2286 }
2289 // Visits all arguments, returns appropriate items without loading them
2290 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2291 LIRItemList* argument_items = new LIRItemList();
2292 if (x->has_receiver()) {
2293 LIRItem* receiver = new LIRItem(x->receiver(), this);
2294 argument_items->append(receiver);
2295 }
2296 int idx = x->has_receiver() ? 1 : 0;
2297 for (int i = 0; i < x->number_of_arguments(); i++) {
2298 LIRItem* param = new LIRItem(x->argument_at(i), this);
2299 argument_items->append(param);
2300 idx += (param->type()->is_double_word() ? 2 : 1);
2301 }
2302 return argument_items;
2303 }
2306 // The invoke with receiver has following phases:
2307 // a) traverse and load/lock receiver;
2308 // b) traverse all arguments -> item-array (invoke_visit_argument)
2309 // c) push receiver on stack
2310 // d) load each of the items and push on stack
2311 // e) unlock receiver
2312 // f) move receiver into receiver-register %o0
2313 // g) lock result registers and emit call operation
2314 //
2315 // Before issuing a call, we must spill-save all values on stack
2316 // that are in caller-save register. "spill-save" moves thos registers
2317 // either in a free callee-save register or spills them if no free
2318 // callee save register is available.
2319 //
2320 // The problem is where to invoke spill-save.
2321 // - if invoked between e) and f), we may lock callee save
2322 // register in "spill-save" that destroys the receiver register
2323 // before f) is executed
2324 // - if we rearange the f) to be earlier, by loading %o0, it
2325 // may destroy a value on the stack that is currently in %o0
2326 // and is waiting to be spilled
2327 // - if we keep the receiver locked while doing spill-save,
2328 // we cannot spill it as it is spill-locked
2329 //
2330 void LIRGenerator::do_Invoke(Invoke* x) {
2331 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2333 LIR_OprList* arg_list = cc->args();
2334 LIRItemList* args = invoke_visit_arguments(x);
2335 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2337 // setup result register
2338 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2339 if (x->type() != voidType) {
2340 result_register = result_register_for(x->type());
2341 }
2343 CodeEmitInfo* info = state_for(x, x->state());
2345 invoke_load_arguments(x, args, arg_list);
2347 if (x->has_receiver()) {
2348 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2349 receiver = args->at(0)->result();
2350 }
2352 // emit invoke code
2353 bool optimized = x->target_is_loaded() && x->target_is_final();
2354 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2356 switch (x->code()) {
2357 case Bytecodes::_invokestatic:
2358 __ call_static(x->target(), result_register,
2359 SharedRuntime::get_resolve_static_call_stub(),
2360 arg_list, info);
2361 break;
2362 case Bytecodes::_invokespecial:
2363 case Bytecodes::_invokevirtual:
2364 case Bytecodes::_invokeinterface:
2365 // for final target we still produce an inline cache, in order
2366 // to be able to call mixed mode
2367 if (x->code() == Bytecodes::_invokespecial || optimized) {
2368 __ call_opt_virtual(x->target(), receiver, result_register,
2369 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2370 arg_list, info);
2371 } else if (x->vtable_index() < 0) {
2372 __ call_icvirtual(x->target(), receiver, result_register,
2373 SharedRuntime::get_resolve_virtual_call_stub(),
2374 arg_list, info);
2375 } else {
2376 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2377 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2378 __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
2379 }
2380 break;
2381 default:
2382 ShouldNotReachHere();
2383 break;
2384 }
2386 if (x->type()->is_float() || x->type()->is_double()) {
2387 // Force rounding of results from non-strictfp when in strictfp
2388 // scope (or when we don't know the strictness of the callee, to
2389 // be safe.)
2390 if (method()->is_strict()) {
2391 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2392 result_register = round_item(result_register);
2393 }
2394 }
2395 }
2397 if (result_register->is_valid()) {
2398 LIR_Opr result = rlock_result(x);
2399 __ move(result_register, result);
2400 }
2401 }
2404 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2405 assert(x->number_of_arguments() == 1, "wrong type");
2406 LIRItem value (x->argument_at(0), this);
2407 LIR_Opr reg = rlock_result(x);
2408 value.load_item();
2409 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2410 __ move(tmp, reg);
2411 }
2415 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2416 void LIRGenerator::do_IfOp(IfOp* x) {
2417 #ifdef ASSERT
2418 {
2419 ValueTag xtag = x->x()->type()->tag();
2420 ValueTag ttag = x->tval()->type()->tag();
2421 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2422 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2423 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2424 }
2425 #endif
2427 LIRItem left(x->x(), this);
2428 LIRItem right(x->y(), this);
2429 left.load_item();
2430 if (can_inline_as_constant(right.value())) {
2431 right.dont_load_item();
2432 } else {
2433 right.load_item();
2434 }
2436 LIRItem t_val(x->tval(), this);
2437 LIRItem f_val(x->fval(), this);
2438 t_val.dont_load_item();
2439 f_val.dont_load_item();
2440 LIR_Opr reg = rlock_result(x);
2442 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2443 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg);
2444 }
2447 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2448 switch (x->id()) {
2449 case vmIntrinsics::_intBitsToFloat :
2450 case vmIntrinsics::_doubleToRawLongBits :
2451 case vmIntrinsics::_longBitsToDouble :
2452 case vmIntrinsics::_floatToRawIntBits : {
2453 do_FPIntrinsics(x);
2454 break;
2455 }
2457 case vmIntrinsics::_currentTimeMillis: {
2458 assert(x->number_of_arguments() == 0, "wrong type");
2459 LIR_Opr reg = result_register_for(x->type());
2460 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
2461 reg, new LIR_OprList());
2462 LIR_Opr result = rlock_result(x);
2463 __ move(reg, result);
2464 break;
2465 }
2467 case vmIntrinsics::_nanoTime: {
2468 assert(x->number_of_arguments() == 0, "wrong type");
2469 LIR_Opr reg = result_register_for(x->type());
2470 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
2471 reg, new LIR_OprList());
2472 LIR_Opr result = rlock_result(x);
2473 __ move(reg, result);
2474 break;
2475 }
2477 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2478 case vmIntrinsics::_getClass: do_getClass(x); break;
2479 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2481 case vmIntrinsics::_dlog: // fall through
2482 case vmIntrinsics::_dlog10: // fall through
2483 case vmIntrinsics::_dabs: // fall through
2484 case vmIntrinsics::_dsqrt: // fall through
2485 case vmIntrinsics::_dtan: // fall through
2486 case vmIntrinsics::_dsin : // fall through
2487 case vmIntrinsics::_dcos : do_MathIntrinsic(x); break;
2488 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
2490 // java.nio.Buffer.checkIndex
2491 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
2493 case vmIntrinsics::_compareAndSwapObject:
2494 do_CompareAndSwap(x, objectType);
2495 break;
2496 case vmIntrinsics::_compareAndSwapInt:
2497 do_CompareAndSwap(x, intType);
2498 break;
2499 case vmIntrinsics::_compareAndSwapLong:
2500 do_CompareAndSwap(x, longType);
2501 break;
2503 // sun.misc.AtomicLongCSImpl.attemptUpdate
2504 case vmIntrinsics::_attemptUpdate:
2505 do_AttemptUpdate(x);
2506 break;
2508 default: ShouldNotReachHere(); break;
2509 }
2510 }
2513 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
2514 // Need recv in a temporary register so it interferes with the other temporaries
2515 LIR_Opr recv = LIR_OprFact::illegalOpr;
2516 LIR_Opr mdo = new_register(T_OBJECT);
2517 LIR_Opr tmp = new_register(T_INT);
2518 if (x->recv() != NULL) {
2519 LIRItem value(x->recv(), this);
2520 value.load_item();
2521 recv = new_register(T_OBJECT);
2522 __ move(value.result(), recv);
2523 }
2524 __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
2525 }
2528 void LIRGenerator::do_ProfileCounter(ProfileCounter* x) {
2529 LIRItem mdo(x->mdo(), this);
2530 mdo.load_item();
2532 increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment());
2533 }
2536 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
2537 LIRItemList args(1);
2538 LIRItem value(arg1, this);
2539 args.append(&value);
2540 BasicTypeList signature;
2541 signature.append(as_BasicType(arg1->type()));
2543 return call_runtime(&signature, &args, entry, result_type, info);
2544 }
2547 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
2548 LIRItemList args(2);
2549 LIRItem value1(arg1, this);
2550 LIRItem value2(arg2, this);
2551 args.append(&value1);
2552 args.append(&value2);
2553 BasicTypeList signature;
2554 signature.append(as_BasicType(arg1->type()));
2555 signature.append(as_BasicType(arg2->type()));
2557 return call_runtime(&signature, &args, entry, result_type, info);
2558 }
2561 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
2562 address entry, ValueType* result_type, CodeEmitInfo* info) {
2563 // get a result register
2564 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
2565 LIR_Opr result = LIR_OprFact::illegalOpr;
2566 if (result_type->tag() != voidTag) {
2567 result = new_register(result_type);
2568 phys_reg = result_register_for(result_type);
2569 }
2571 // move the arguments into the correct location
2572 CallingConvention* cc = frame_map()->c_calling_convention(signature);
2573 assert(cc->length() == args->length(), "argument mismatch");
2574 for (int i = 0; i < args->length(); i++) {
2575 LIR_Opr arg = args->at(i);
2576 LIR_Opr loc = cc->at(i);
2577 if (loc->is_register()) {
2578 __ move(arg, loc);
2579 } else {
2580 LIR_Address* addr = loc->as_address_ptr();
2581 // if (!can_store_as_constant(arg)) {
2582 // LIR_Opr tmp = new_register(arg->type());
2583 // __ move(arg, tmp);
2584 // arg = tmp;
2585 // }
2586 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2587 __ unaligned_move(arg, addr);
2588 } else {
2589 __ move(arg, addr);
2590 }
2591 }
2592 }
2594 if (info) {
2595 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
2596 } else {
2597 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
2598 }
2599 if (result->is_valid()) {
2600 __ move(phys_reg, result);
2601 }
2602 return result;
2603 }
2606 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
2607 address entry, ValueType* result_type, CodeEmitInfo* info) {
2608 // get a result register
2609 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
2610 LIR_Opr result = LIR_OprFact::illegalOpr;
2611 if (result_type->tag() != voidTag) {
2612 result = new_register(result_type);
2613 phys_reg = result_register_for(result_type);
2614 }
2616 // move the arguments into the correct location
2617 CallingConvention* cc = frame_map()->c_calling_convention(signature);
2619 assert(cc->length() == args->length(), "argument mismatch");
2620 for (int i = 0; i < args->length(); i++) {
2621 LIRItem* arg = args->at(i);
2622 LIR_Opr loc = cc->at(i);
2623 if (loc->is_register()) {
2624 arg->load_item_force(loc);
2625 } else {
2626 LIR_Address* addr = loc->as_address_ptr();
2627 arg->load_for_store(addr->type());
2628 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2629 __ unaligned_move(arg->result(), addr);
2630 } else {
2631 __ move(arg->result(), addr);
2632 }
2633 }
2634 }
2636 if (info) {
2637 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
2638 } else {
2639 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
2640 }
2641 if (result->is_valid()) {
2642 __ move(phys_reg, result);
2643 }
2644 return result;
2645 }
2649 void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) {
2650 #ifdef TIERED
2651 if (_compilation->env()->comp_level() == CompLevel_fast_compile &&
2652 (method()->code_size() >= Tier1BytecodeLimit || backedge)) {
2653 int limit = InvocationCounter::Tier1InvocationLimit;
2654 int offset = in_bytes(methodOopDesc::invocation_counter_offset() +
2655 InvocationCounter::counter_offset());
2656 if (backedge) {
2657 limit = InvocationCounter::Tier1BackEdgeLimit;
2658 offset = in_bytes(methodOopDesc::backedge_counter_offset() +
2659 InvocationCounter::counter_offset());
2660 }
2662 LIR_Opr meth = new_register(T_OBJECT);
2663 __ oop2reg(method()->constant_encoding(), meth);
2664 LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
2665 __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
2666 CodeStub* overflow = new CounterOverflowStub(info, info->bci());
2667 __ branch(lir_cond_aboveEqual, T_INT, overflow);
2668 __ branch_destination(overflow->continuation());
2669 }
2670 #endif
2671 }