Sat, 01 Sep 2012 13:25:18 -0400
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
1 /*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_ValueStack.hpp"
32 #include "ci/ciArrayKlass.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "ci/ciObjArray.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "utilities/bitMap.inline.hpp"
38 #ifndef SERIALGC
39 #include "gc_implementation/g1/heapRegion.hpp"
40 #endif
42 #ifdef ASSERT
43 #define __ gen()->lir(__FILE__, __LINE__)->
44 #else
45 #define __ gen()->lir()->
46 #endif
48 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
49 #ifdef ARM
50 #define PATCHED_ADDR (204)
51 #else
52 #define PATCHED_ADDR (max_jint)
53 #endif
55 void PhiResolverState::reset(int max_vregs) {
56 // Initialize array sizes
57 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
58 _virtual_operands.trunc_to(0);
59 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
60 _other_operands.trunc_to(0);
61 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
62 _vreg_table.trunc_to(0);
63 }
67 //--------------------------------------------------------------
68 // PhiResolver
70 // Resolves cycles:
71 //
72 // r1 := r2 becomes temp := r1
73 // r2 := r1 r1 := r2
74 // r2 := temp
75 // and orders moves:
76 //
77 // r2 := r3 becomes r1 := r2
78 // r1 := r2 r2 := r3
80 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
81 : _gen(gen)
82 , _state(gen->resolver_state())
83 , _temp(LIR_OprFact::illegalOpr)
84 {
85 // reinitialize the shared state arrays
86 _state.reset(max_vregs);
87 }
90 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
91 assert(src->is_valid(), "");
92 assert(dest->is_valid(), "");
93 __ move(src, dest);
94 }
97 void PhiResolver::move_temp_to(LIR_Opr dest) {
98 assert(_temp->is_valid(), "");
99 emit_move(_temp, dest);
100 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
101 }
104 void PhiResolver::move_to_temp(LIR_Opr src) {
105 assert(_temp->is_illegal(), "");
106 _temp = _gen->new_register(src->type());
107 emit_move(src, _temp);
108 }
111 // Traverse assignment graph in depth first order and generate moves in post order
112 // ie. two assignments: b := c, a := b start with node c:
113 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
114 // Generates moves in this order: move b to a and move c to b
115 // ie. cycle a := b, b := a start with node a
116 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
117 // Generates moves in this order: move b to temp, move a to b, move temp to a
118 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
119 if (!dest->visited()) {
120 dest->set_visited();
121 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
122 move(dest, dest->destination_at(i));
123 }
124 } else if (!dest->start_node()) {
125 // cylce in graph detected
126 assert(_loop == NULL, "only one loop valid!");
127 _loop = dest;
128 move_to_temp(src->operand());
129 return;
130 } // else dest is a start node
132 if (!dest->assigned()) {
133 if (_loop == dest) {
134 move_temp_to(dest->operand());
135 dest->set_assigned();
136 } else if (src != NULL) {
137 emit_move(src->operand(), dest->operand());
138 dest->set_assigned();
139 }
140 }
141 }
144 PhiResolver::~PhiResolver() {
145 int i;
146 // resolve any cycles in moves from and to virtual registers
147 for (i = virtual_operands().length() - 1; i >= 0; i --) {
148 ResolveNode* node = virtual_operands()[i];
149 if (!node->visited()) {
150 _loop = NULL;
151 move(NULL, node);
152 node->set_start_node();
153 assert(_temp->is_illegal(), "move_temp_to() call missing");
154 }
155 }
157 // generate move for move from non virtual register to abitrary destination
158 for (i = other_operands().length() - 1; i >= 0; i --) {
159 ResolveNode* node = other_operands()[i];
160 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
161 emit_move(node->operand(), node->destination_at(j)->operand());
162 }
163 }
164 }
167 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
168 ResolveNode* node;
169 if (opr->is_virtual()) {
170 int vreg_num = opr->vreg_number();
171 node = vreg_table().at_grow(vreg_num, NULL);
172 assert(node == NULL || node->operand() == opr, "");
173 if (node == NULL) {
174 node = new ResolveNode(opr);
175 vreg_table()[vreg_num] = node;
176 }
177 // Make sure that all virtual operands show up in the list when
178 // they are used as the source of a move.
179 if (source && !virtual_operands().contains(node)) {
180 virtual_operands().append(node);
181 }
182 } else {
183 assert(source, "");
184 node = new ResolveNode(opr);
185 other_operands().append(node);
186 }
187 return node;
188 }
191 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
192 assert(dest->is_virtual(), "");
193 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
194 assert(src->is_valid(), "");
195 assert(dest->is_valid(), "");
196 ResolveNode* source = source_node(src);
197 source->append(destination_node(dest));
198 }
201 //--------------------------------------------------------------
202 // LIRItem
204 void LIRItem::set_result(LIR_Opr opr) {
205 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
206 value()->set_operand(opr);
208 if (opr->is_virtual()) {
209 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
210 }
212 _result = opr;
213 }
215 void LIRItem::load_item() {
216 if (result()->is_illegal()) {
217 // update the items result
218 _result = value()->operand();
219 }
220 if (!result()->is_register()) {
221 LIR_Opr reg = _gen->new_register(value()->type());
222 __ move(result(), reg);
223 if (result()->is_constant()) {
224 _result = reg;
225 } else {
226 set_result(reg);
227 }
228 }
229 }
232 void LIRItem::load_for_store(BasicType type) {
233 if (_gen->can_store_as_constant(value(), type)) {
234 _result = value()->operand();
235 if (!_result->is_constant()) {
236 _result = LIR_OprFact::value_type(value()->type());
237 }
238 } else if (type == T_BYTE || type == T_BOOLEAN) {
239 load_byte_item();
240 } else {
241 load_item();
242 }
243 }
245 void LIRItem::load_item_force(LIR_Opr reg) {
246 LIR_Opr r = result();
247 if (r != reg) {
248 #if !defined(ARM) && !defined(E500V2)
249 if (r->type() != reg->type()) {
250 // moves between different types need an intervening spill slot
251 r = _gen->force_to_spill(r, reg->type());
252 }
253 #endif
254 __ move(r, reg);
255 _result = reg;
256 }
257 }
259 ciObject* LIRItem::get_jobject_constant() const {
260 ObjectType* oc = type()->as_ObjectType();
261 if (oc) {
262 return oc->constant_value();
263 }
264 return NULL;
265 }
268 jint LIRItem::get_jint_constant() const {
269 assert(is_constant() && value() != NULL, "");
270 assert(type()->as_IntConstant() != NULL, "type check");
271 return type()->as_IntConstant()->value();
272 }
275 jint LIRItem::get_address_constant() const {
276 assert(is_constant() && value() != NULL, "");
277 assert(type()->as_AddressConstant() != NULL, "type check");
278 return type()->as_AddressConstant()->value();
279 }
282 jfloat LIRItem::get_jfloat_constant() const {
283 assert(is_constant() && value() != NULL, "");
284 assert(type()->as_FloatConstant() != NULL, "type check");
285 return type()->as_FloatConstant()->value();
286 }
289 jdouble LIRItem::get_jdouble_constant() const {
290 assert(is_constant() && value() != NULL, "");
291 assert(type()->as_DoubleConstant() != NULL, "type check");
292 return type()->as_DoubleConstant()->value();
293 }
296 jlong LIRItem::get_jlong_constant() const {
297 assert(is_constant() && value() != NULL, "");
298 assert(type()->as_LongConstant() != NULL, "type check");
299 return type()->as_LongConstant()->value();
300 }
304 //--------------------------------------------------------------
307 void LIRGenerator::init() {
308 _bs = Universe::heap()->barrier_set();
309 }
312 void LIRGenerator::block_do_prolog(BlockBegin* block) {
313 #ifndef PRODUCT
314 if (PrintIRWithLIR) {
315 block->print();
316 }
317 #endif
319 // set up the list of LIR instructions
320 assert(block->lir() == NULL, "LIR list already computed for this block");
321 _lir = new LIR_List(compilation(), block);
322 block->set_lir(_lir);
324 __ branch_destination(block->label());
326 if (LIRTraceExecution &&
327 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
328 !block->is_set(BlockBegin::exception_entry_flag)) {
329 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
330 trace_block_entry(block);
331 }
332 }
335 void LIRGenerator::block_do_epilog(BlockBegin* block) {
336 #ifndef PRODUCT
337 if (PrintIRWithLIR) {
338 tty->cr();
339 }
340 #endif
342 // LIR_Opr for unpinned constants shouldn't be referenced by other
343 // blocks so clear them out after processing the block.
344 for (int i = 0; i < _unpinned_constants.length(); i++) {
345 _unpinned_constants.at(i)->clear_operand();
346 }
347 _unpinned_constants.trunc_to(0);
349 // clear our any registers for other local constants
350 _constants.trunc_to(0);
351 _reg_for_constants.trunc_to(0);
352 }
355 void LIRGenerator::block_do(BlockBegin* block) {
356 CHECK_BAILOUT();
358 block_do_prolog(block);
359 set_block(block);
361 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
362 if (instr->is_pinned()) do_root(instr);
363 }
365 set_block(NULL);
366 block_do_epilog(block);
367 }
370 //-------------------------LIRGenerator-----------------------------
372 // This is where the tree-walk starts; instr must be root;
373 void LIRGenerator::do_root(Value instr) {
374 CHECK_BAILOUT();
376 InstructionMark im(compilation(), instr);
378 assert(instr->is_pinned(), "use only with roots");
379 assert(instr->subst() == instr, "shouldn't have missed substitution");
381 instr->visit(this);
383 assert(!instr->has_uses() || instr->operand()->is_valid() ||
384 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
385 }
388 // This is called for each node in tree; the walk stops if a root is reached
389 void LIRGenerator::walk(Value instr) {
390 InstructionMark im(compilation(), instr);
391 //stop walk when encounter a root
392 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
393 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
394 } else {
395 assert(instr->subst() == instr, "shouldn't have missed substitution");
396 instr->visit(this);
397 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
398 }
399 }
402 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
403 assert(state != NULL, "state must be defined");
405 ValueStack* s = state;
406 for_each_state(s) {
407 if (s->kind() == ValueStack::EmptyExceptionState) {
408 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
409 continue;
410 }
412 int index;
413 Value value;
414 for_each_stack_value(s, index, value) {
415 assert(value->subst() == value, "missed substitution");
416 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
417 walk(value);
418 assert(value->operand()->is_valid(), "must be evaluated now");
419 }
420 }
422 int bci = s->bci();
423 IRScope* scope = s->scope();
424 ciMethod* method = scope->method();
426 MethodLivenessResult liveness = method->liveness_at_bci(bci);
427 if (bci == SynchronizationEntryBCI) {
428 if (x->as_ExceptionObject() || x->as_Throw()) {
429 // all locals are dead on exit from the synthetic unlocker
430 liveness.clear();
431 } else {
432 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
433 }
434 }
435 if (!liveness.is_valid()) {
436 // Degenerate or breakpointed method.
437 bailout("Degenerate or breakpointed method");
438 } else {
439 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
440 for_each_local_value(s, index, value) {
441 assert(value->subst() == value, "missed substition");
442 if (liveness.at(index) && !value->type()->is_illegal()) {
443 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
444 walk(value);
445 assert(value->operand()->is_valid(), "must be evaluated now");
446 }
447 } else {
448 // NULL out this local so that linear scan can assume that all non-NULL values are live.
449 s->invalidate_local(index);
450 }
451 }
452 }
453 }
455 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
456 }
459 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
460 return state_for(x, x->exception_state());
461 }
464 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) {
465 if (!obj->is_loaded() || PatchALot) {
466 assert(info != NULL, "info must be set if class is not loaded");
467 __ klass2reg_patch(NULL, r, info);
468 } else {
469 // no patching needed
470 __ oop2reg(obj->constant_encoding(), r);
471 }
472 }
475 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
476 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
477 CodeStub* stub = new RangeCheckStub(range_check_info, index);
478 if (index->is_constant()) {
479 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
480 index->as_jint(), null_check_info);
481 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
482 } else {
483 cmp_reg_mem(lir_cond_aboveEqual, index, array,
484 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
485 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
486 }
487 }
490 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
491 CodeStub* stub = new RangeCheckStub(info, index, true);
492 if (index->is_constant()) {
493 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
494 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
495 } else {
496 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
497 java_nio_Buffer::limit_offset(), T_INT, info);
498 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
499 }
500 __ move(index, result);
501 }
505 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
506 LIR_Opr result_op = result;
507 LIR_Opr left_op = left;
508 LIR_Opr right_op = right;
510 if (TwoOperandLIRForm && left_op != result_op) {
511 assert(right_op != result_op, "malformed");
512 __ move(left_op, result_op);
513 left_op = result_op;
514 }
516 switch(code) {
517 case Bytecodes::_dadd:
518 case Bytecodes::_fadd:
519 case Bytecodes::_ladd:
520 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
521 case Bytecodes::_fmul:
522 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
524 case Bytecodes::_dmul:
525 {
526 if (is_strictfp) {
527 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
528 } else {
529 __ mul(left_op, right_op, result_op); break;
530 }
531 }
532 break;
534 case Bytecodes::_imul:
535 {
536 bool did_strength_reduce = false;
538 if (right->is_constant()) {
539 int c = right->as_jint();
540 if (is_power_of_2(c)) {
541 // do not need tmp here
542 __ shift_left(left_op, exact_log2(c), result_op);
543 did_strength_reduce = true;
544 } else {
545 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
546 }
547 }
548 // we couldn't strength reduce so just emit the multiply
549 if (!did_strength_reduce) {
550 __ mul(left_op, right_op, result_op);
551 }
552 }
553 break;
555 case Bytecodes::_dsub:
556 case Bytecodes::_fsub:
557 case Bytecodes::_lsub:
558 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
560 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
561 // ldiv and lrem are implemented with a direct runtime call
563 case Bytecodes::_ddiv:
564 {
565 if (is_strictfp) {
566 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
567 } else {
568 __ div (left_op, right_op, result_op); break;
569 }
570 }
571 break;
573 case Bytecodes::_drem:
574 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
576 default: ShouldNotReachHere();
577 }
578 }
581 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
582 arithmetic_op(code, result, left, right, false, tmp);
583 }
586 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
587 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
588 }
591 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
592 arithmetic_op(code, result, left, right, is_strictfp, tmp);
593 }
596 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
597 if (TwoOperandLIRForm && value != result_op) {
598 assert(count != result_op, "malformed");
599 __ move(value, result_op);
600 value = result_op;
601 }
603 assert(count->is_constant() || count->is_register(), "must be");
604 switch(code) {
605 case Bytecodes::_ishl:
606 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
607 case Bytecodes::_ishr:
608 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
609 case Bytecodes::_iushr:
610 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
611 default: ShouldNotReachHere();
612 }
613 }
616 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
617 if (TwoOperandLIRForm && left_op != result_op) {
618 assert(right_op != result_op, "malformed");
619 __ move(left_op, result_op);
620 left_op = result_op;
621 }
623 switch(code) {
624 case Bytecodes::_iand:
625 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
627 case Bytecodes::_ior:
628 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
630 case Bytecodes::_ixor:
631 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
633 default: ShouldNotReachHere();
634 }
635 }
638 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
639 if (!GenerateSynchronizationCode) return;
640 // for slow path, use debug info for state after successful locking
641 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
642 __ load_stack_address_monitor(monitor_no, lock);
643 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
644 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
645 }
648 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
649 if (!GenerateSynchronizationCode) return;
650 // setup registers
651 LIR_Opr hdr = lock;
652 lock = new_hdr;
653 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
654 __ load_stack_address_monitor(monitor_no, lock);
655 __ unlock_object(hdr, object, lock, scratch, slow_path);
656 }
659 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
660 klass2reg_with_patching(klass_reg, klass, info);
661 // If klass is not loaded we do not know if the klass has finalizers:
662 if (UseFastNewInstance && klass->is_loaded()
663 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
665 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
667 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
669 assert(klass->is_loaded(), "must be loaded");
670 // allocate space for instance
671 assert(klass->size_helper() >= 0, "illegal instance size");
672 const int instance_size = align_object_size(klass->size_helper());
673 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
674 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
675 } else {
676 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
677 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
678 __ branch_destination(slow_path->continuation());
679 }
680 }
683 static bool is_constant_zero(Instruction* inst) {
684 IntConstant* c = inst->type()->as_IntConstant();
685 if (c) {
686 return (c->value() == 0);
687 }
688 return false;
689 }
692 static bool positive_constant(Instruction* inst) {
693 IntConstant* c = inst->type()->as_IntConstant();
694 if (c) {
695 return (c->value() >= 0);
696 }
697 return false;
698 }
701 static ciArrayKlass* as_array_klass(ciType* type) {
702 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
703 return (ciArrayKlass*)type;
704 } else {
705 return NULL;
706 }
707 }
709 static Value maxvalue(IfOp* ifop) {
710 switch (ifop->cond()) {
711 case If::eql: return NULL;
712 case If::neq: return NULL;
713 case If::lss: // x < y ? x : y
714 case If::leq: // x <= y ? x : y
715 if (ifop->x() == ifop->tval() &&
716 ifop->y() == ifop->fval()) return ifop->y();
717 return NULL;
719 case If::gtr: // x > y ? y : x
720 case If::geq: // x >= y ? y : x
721 if (ifop->x() == ifop->tval() &&
722 ifop->y() == ifop->fval()) return ifop->y();
723 return NULL;
725 }
726 }
728 static ciType* phi_declared_type(Phi* phi) {
729 ciType* t = phi->operand_at(0)->declared_type();
730 if (t == NULL) {
731 return NULL;
732 }
733 for(int i = 1; i < phi->operand_count(); i++) {
734 if (t != phi->operand_at(i)->declared_type()) {
735 return NULL;
736 }
737 }
738 return t;
739 }
741 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
742 Instruction* src = x->argument_at(0);
743 Instruction* src_pos = x->argument_at(1);
744 Instruction* dst = x->argument_at(2);
745 Instruction* dst_pos = x->argument_at(3);
746 Instruction* length = x->argument_at(4);
748 // first try to identify the likely type of the arrays involved
749 ciArrayKlass* expected_type = NULL;
750 bool is_exact = false, src_objarray = false, dst_objarray = false;
751 {
752 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
753 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
754 Phi* phi;
755 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
756 src_declared_type = as_array_klass(phi_declared_type(phi));
757 }
758 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
759 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
760 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
761 dst_declared_type = as_array_klass(phi_declared_type(phi));
762 }
764 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
765 // the types exactly match so the type is fully known
766 is_exact = true;
767 expected_type = src_exact_type;
768 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
769 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
770 ciArrayKlass* src_type = NULL;
771 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
772 src_type = (ciArrayKlass*) src_exact_type;
773 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
774 src_type = (ciArrayKlass*) src_declared_type;
775 }
776 if (src_type != NULL) {
777 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
778 is_exact = true;
779 expected_type = dst_type;
780 }
781 }
782 }
783 // at least pass along a good guess
784 if (expected_type == NULL) expected_type = dst_exact_type;
785 if (expected_type == NULL) expected_type = src_declared_type;
786 if (expected_type == NULL) expected_type = dst_declared_type;
788 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
789 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
790 }
792 // if a probable array type has been identified, figure out if any
793 // of the required checks for a fast case can be elided.
794 int flags = LIR_OpArrayCopy::all_flags;
796 if (!src_objarray)
797 flags &= ~LIR_OpArrayCopy::src_objarray;
798 if (!dst_objarray)
799 flags &= ~LIR_OpArrayCopy::dst_objarray;
801 if (!x->arg_needs_null_check(0))
802 flags &= ~LIR_OpArrayCopy::src_null_check;
803 if (!x->arg_needs_null_check(2))
804 flags &= ~LIR_OpArrayCopy::dst_null_check;
807 if (expected_type != NULL) {
808 Value length_limit = NULL;
810 IfOp* ifop = length->as_IfOp();
811 if (ifop != NULL) {
812 // look for expressions like min(v, a.length) which ends up as
813 // x > y ? y : x or x >= y ? y : x
814 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
815 ifop->x() == ifop->fval() &&
816 ifop->y() == ifop->tval()) {
817 length_limit = ifop->y();
818 }
819 }
821 // try to skip null checks and range checks
822 NewArray* src_array = src->as_NewArray();
823 if (src_array != NULL) {
824 flags &= ~LIR_OpArrayCopy::src_null_check;
825 if (length_limit != NULL &&
826 src_array->length() == length_limit &&
827 is_constant_zero(src_pos)) {
828 flags &= ~LIR_OpArrayCopy::src_range_check;
829 }
830 }
832 NewArray* dst_array = dst->as_NewArray();
833 if (dst_array != NULL) {
834 flags &= ~LIR_OpArrayCopy::dst_null_check;
835 if (length_limit != NULL &&
836 dst_array->length() == length_limit &&
837 is_constant_zero(dst_pos)) {
838 flags &= ~LIR_OpArrayCopy::dst_range_check;
839 }
840 }
842 // check from incoming constant values
843 if (positive_constant(src_pos))
844 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
845 if (positive_constant(dst_pos))
846 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
847 if (positive_constant(length))
848 flags &= ~LIR_OpArrayCopy::length_positive_check;
850 // see if the range check can be elided, which might also imply
851 // that src or dst is non-null.
852 ArrayLength* al = length->as_ArrayLength();
853 if (al != NULL) {
854 if (al->array() == src) {
855 // it's the length of the source array
856 flags &= ~LIR_OpArrayCopy::length_positive_check;
857 flags &= ~LIR_OpArrayCopy::src_null_check;
858 if (is_constant_zero(src_pos))
859 flags &= ~LIR_OpArrayCopy::src_range_check;
860 }
861 if (al->array() == dst) {
862 // it's the length of the destination array
863 flags &= ~LIR_OpArrayCopy::length_positive_check;
864 flags &= ~LIR_OpArrayCopy::dst_null_check;
865 if (is_constant_zero(dst_pos))
866 flags &= ~LIR_OpArrayCopy::dst_range_check;
867 }
868 }
869 if (is_exact) {
870 flags &= ~LIR_OpArrayCopy::type_check;
871 }
872 }
874 IntConstant* src_int = src_pos->type()->as_IntConstant();
875 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
876 if (src_int && dst_int) {
877 int s_offs = src_int->value();
878 int d_offs = dst_int->value();
879 if (src_int->value() >= dst_int->value()) {
880 flags &= ~LIR_OpArrayCopy::overlapping;
881 }
882 if (expected_type != NULL) {
883 BasicType t = expected_type->element_type()->basic_type();
884 int element_size = type2aelembytes(t);
885 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
886 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
887 flags &= ~LIR_OpArrayCopy::unaligned;
888 }
889 }
890 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
891 // src and dest positions are the same, or dst is zero so assume
892 // nonoverlapping copy.
893 flags &= ~LIR_OpArrayCopy::overlapping;
894 }
896 if (src == dst) {
897 // moving within a single array so no type checks are needed
898 if (flags & LIR_OpArrayCopy::type_check) {
899 flags &= ~LIR_OpArrayCopy::type_check;
900 }
901 }
902 *flagsp = flags;
903 *expected_typep = (ciArrayKlass*)expected_type;
904 }
907 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
908 assert(opr->is_register(), "why spill if item is not register?");
910 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
911 LIR_Opr result = new_register(T_FLOAT);
912 set_vreg_flag(result, must_start_in_memory);
913 assert(opr->is_register(), "only a register can be spilled");
914 assert(opr->value_type()->is_float(), "rounding only for floats available");
915 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
916 return result;
917 }
918 return opr;
919 }
922 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
923 assert(type2size[t] == type2size[value->type()],
924 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
925 if (!value->is_register()) {
926 // force into a register
927 LIR_Opr r = new_register(value->type());
928 __ move(value, r);
929 value = r;
930 }
932 // create a spill location
933 LIR_Opr tmp = new_register(t);
934 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
936 // move from register to spill
937 __ move(value, tmp);
938 return tmp;
939 }
941 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
942 if (if_instr->should_profile()) {
943 ciMethod* method = if_instr->profiled_method();
944 assert(method != NULL, "method should be set if branch is profiled");
945 ciMethodData* md = method->method_data_or_null();
946 assert(md != NULL, "Sanity");
947 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
948 assert(data != NULL, "must have profiling data");
949 assert(data->is_BranchData(), "need BranchData for two-way branches");
950 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
951 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
952 if (if_instr->is_swapped()) {
953 int t = taken_count_offset;
954 taken_count_offset = not_taken_count_offset;
955 not_taken_count_offset = t;
956 }
958 LIR_Opr md_reg = new_register(T_OBJECT);
959 __ oop2reg(md->constant_encoding(), md_reg);
961 LIR_Opr data_offset_reg = new_pointer_register();
962 __ cmove(lir_cond(cond),
963 LIR_OprFact::intptrConst(taken_count_offset),
964 LIR_OprFact::intptrConst(not_taken_count_offset),
965 data_offset_reg, as_BasicType(if_instr->x()->type()));
967 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
968 LIR_Opr data_reg = new_pointer_register();
969 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
970 __ move(data_addr, data_reg);
971 // Use leal instead of add to avoid destroying condition codes on x86
972 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
973 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
974 __ move(data_reg, data_addr);
975 }
976 }
978 // Phi technique:
979 // This is about passing live values from one basic block to the other.
980 // In code generated with Java it is rather rare that more than one
981 // value is on the stack from one basic block to the other.
982 // We optimize our technique for efficient passing of one value
983 // (of type long, int, double..) but it can be extended.
984 // When entering or leaving a basic block, all registers and all spill
985 // slots are release and empty. We use the released registers
986 // and spill slots to pass the live values from one block
987 // to the other. The topmost value, i.e., the value on TOS of expression
988 // stack is passed in registers. All other values are stored in spilling
989 // area. Every Phi has an index which designates its spill slot
990 // At exit of a basic block, we fill the register(s) and spill slots.
991 // At entry of a basic block, the block_prolog sets up the content of phi nodes
992 // and locks necessary registers and spilling slots.
995 // move current value to referenced phi function
996 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
997 Phi* phi = sux_val->as_Phi();
998 // cur_val can be null without phi being null in conjunction with inlining
999 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1000 LIR_Opr operand = cur_val->operand();
1001 if (cur_val->operand()->is_illegal()) {
1002 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1003 "these can be produced lazily");
1004 operand = operand_for_instruction(cur_val);
1005 }
1006 resolver->move(operand, operand_for_instruction(phi));
1007 }
1008 }
1011 // Moves all stack values into their PHI position
1012 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1013 BlockBegin* bb = block();
1014 if (bb->number_of_sux() == 1) {
1015 BlockBegin* sux = bb->sux_at(0);
1016 assert(sux->number_of_preds() > 0, "invalid CFG");
1018 // a block with only one predecessor never has phi functions
1019 if (sux->number_of_preds() > 1) {
1020 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1021 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1023 ValueStack* sux_state = sux->state();
1024 Value sux_value;
1025 int index;
1027 assert(cur_state->scope() == sux_state->scope(), "not matching");
1028 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1029 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1031 for_each_stack_value(sux_state, index, sux_value) {
1032 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1033 }
1035 for_each_local_value(sux_state, index, sux_value) {
1036 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1037 }
1039 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1040 }
1041 }
1042 }
1045 LIR_Opr LIRGenerator::new_register(BasicType type) {
1046 int vreg = _virtual_register_number;
1047 // add a little fudge factor for the bailout, since the bailout is
1048 // only checked periodically. This gives a few extra registers to
1049 // hand out before we really run out, which helps us keep from
1050 // tripping over assertions.
1051 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1052 bailout("out of virtual registers");
1053 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1054 // wrap it around
1055 _virtual_register_number = LIR_OprDesc::vreg_base;
1056 }
1057 }
1058 _virtual_register_number += 1;
1059 return LIR_OprFact::virtual_register(vreg, type);
1060 }
1063 // Try to lock using register in hint
1064 LIR_Opr LIRGenerator::rlock(Value instr) {
1065 return new_register(instr->type());
1066 }
1069 // does an rlock and sets result
1070 LIR_Opr LIRGenerator::rlock_result(Value x) {
1071 LIR_Opr reg = rlock(x);
1072 set_result(x, reg);
1073 return reg;
1074 }
1077 // does an rlock and sets result
1078 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1079 LIR_Opr reg;
1080 switch (type) {
1081 case T_BYTE:
1082 case T_BOOLEAN:
1083 reg = rlock_byte(type);
1084 break;
1085 default:
1086 reg = rlock(x);
1087 break;
1088 }
1090 set_result(x, reg);
1091 return reg;
1092 }
1095 //---------------------------------------------------------------------
1096 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1097 ObjectType* oc = value->type()->as_ObjectType();
1098 if (oc) {
1099 return oc->constant_value();
1100 }
1101 return NULL;
1102 }
1105 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1106 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1107 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1109 // no moves are created for phi functions at the begin of exception
1110 // handlers, so assign operands manually here
1111 for_each_phi_fun(block(), phi,
1112 operand_for_instruction(phi));
1114 LIR_Opr thread_reg = getThreadPointer();
1115 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1116 exceptionOopOpr());
1117 __ move_wide(LIR_OprFact::oopConst(NULL),
1118 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1119 __ move_wide(LIR_OprFact::oopConst(NULL),
1120 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1122 LIR_Opr result = new_register(T_OBJECT);
1123 __ move(exceptionOopOpr(), result);
1124 set_result(x, result);
1125 }
1128 //----------------------------------------------------------------------
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 //----------------------------------------------------------------------
1132 // visitor functions
1133 //----------------------------------------------------------------------
1134 //----------------------------------------------------------------------
1135 //----------------------------------------------------------------------
1136 //----------------------------------------------------------------------
1138 void LIRGenerator::do_Phi(Phi* x) {
1139 // phi functions are never visited directly
1140 ShouldNotReachHere();
1141 }
1144 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1145 void LIRGenerator::do_Constant(Constant* x) {
1146 if (x->state_before() != NULL) {
1147 // Any constant with a ValueStack requires patching so emit the patch here
1148 LIR_Opr reg = rlock_result(x);
1149 CodeEmitInfo* info = state_for(x, x->state_before());
1150 __ oop2reg_patch(NULL, reg, info);
1151 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1152 if (!x->is_pinned()) {
1153 // unpinned constants are handled specially so that they can be
1154 // put into registers when they are used multiple times within a
1155 // block. After the block completes their operand will be
1156 // cleared so that other blocks can't refer to that register.
1157 set_result(x, load_constant(x));
1158 } else {
1159 LIR_Opr res = x->operand();
1160 if (!res->is_valid()) {
1161 res = LIR_OprFact::value_type(x->type());
1162 }
1163 if (res->is_constant()) {
1164 LIR_Opr reg = rlock_result(x);
1165 __ move(res, reg);
1166 } else {
1167 set_result(x, res);
1168 }
1169 }
1170 } else {
1171 set_result(x, LIR_OprFact::value_type(x->type()));
1172 }
1173 }
1176 void LIRGenerator::do_Local(Local* x) {
1177 // operand_for_instruction has the side effect of setting the result
1178 // so there's no need to do it here.
1179 operand_for_instruction(x);
1180 }
1183 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1184 Unimplemented();
1185 }
1188 void LIRGenerator::do_Return(Return* x) {
1189 if (compilation()->env()->dtrace_method_probes()) {
1190 BasicTypeList signature;
1191 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1192 signature.append(T_OBJECT); // Method*
1193 LIR_OprList* args = new LIR_OprList();
1194 args->append(getThreadPointer());
1195 LIR_Opr meth = new_register(T_OBJECT);
1196 __ oop2reg(method()->constant_encoding(), meth);
1197 args->append(meth);
1198 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1199 }
1201 if (x->type()->is_void()) {
1202 __ return_op(LIR_OprFact::illegalOpr);
1203 } else {
1204 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1205 LIRItem result(x->result(), this);
1207 result.load_item_force(reg);
1208 __ return_op(result.result());
1209 }
1210 set_no_result(x);
1211 }
1213 // Examble: ref.get()
1214 // Combination of LoadField and g1 pre-write barrier
1215 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1217 const int referent_offset = java_lang_ref_Reference::referent_offset;
1218 guarantee(referent_offset > 0, "referent offset not initialized");
1220 assert(x->number_of_arguments() == 1, "wrong type");
1222 LIRItem reference(x->argument_at(0), this);
1223 reference.load_item();
1225 // need to perform the null check on the reference objecy
1226 CodeEmitInfo* info = NULL;
1227 if (x->needs_null_check()) {
1228 info = state_for(x);
1229 }
1231 LIR_Address* referent_field_adr =
1232 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1234 LIR_Opr result = rlock_result(x);
1236 __ load(referent_field_adr, result, info);
1238 // Register the value in the referent field with the pre-barrier
1239 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1240 result /* pre_val */,
1241 false /* do_load */,
1242 false /* patch */,
1243 NULL /* info */);
1244 }
1246 // Example: clazz.isInstance(object)
1247 void LIRGenerator::do_isInstance(Intrinsic* x) {
1248 assert(x->number_of_arguments() == 2, "wrong type");
1250 // TODO could try to substitute this node with an equivalent InstanceOf
1251 // if clazz is known to be a constant Class. This will pick up newly found
1252 // constants after HIR construction. I'll leave this to a future change.
1254 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1255 // could follow the aastore example in a future change.
1257 LIRItem clazz(x->argument_at(0), this);
1258 LIRItem object(x->argument_at(1), this);
1259 clazz.load_item();
1260 object.load_item();
1261 LIR_Opr result = rlock_result(x);
1263 // need to perform null check on clazz
1264 if (x->needs_null_check()) {
1265 CodeEmitInfo* info = state_for(x);
1266 __ null_check(clazz.result(), info);
1267 }
1269 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1270 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1271 x->type(),
1272 NULL); // NULL CodeEmitInfo results in a leaf call
1273 __ move(call_result, result);
1274 }
1276 // Example: object.getClass ()
1277 void LIRGenerator::do_getClass(Intrinsic* x) {
1278 assert(x->number_of_arguments() == 1, "wrong type");
1280 LIRItem rcvr(x->argument_at(0), this);
1281 rcvr.load_item();
1282 LIR_Opr result = rlock_result(x);
1284 // need to perform the null check on the rcvr
1285 CodeEmitInfo* info = NULL;
1286 if (x->needs_null_check()) {
1287 info = state_for(x);
1288 }
1289 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), UseCompressedKlassPointers ? T_OBJECT : T_ADDRESS), result, info);
1290 __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1291 }
1294 // Example: Thread.currentThread()
1295 void LIRGenerator::do_currentThread(Intrinsic* x) {
1296 assert(x->number_of_arguments() == 0, "wrong type");
1297 LIR_Opr reg = rlock_result(x);
1298 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1299 }
1302 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1303 assert(x->number_of_arguments() == 1, "wrong type");
1304 LIRItem receiver(x->argument_at(0), this);
1306 receiver.load_item();
1307 BasicTypeList signature;
1308 signature.append(T_OBJECT); // receiver
1309 LIR_OprList* args = new LIR_OprList();
1310 args->append(receiver.result());
1311 CodeEmitInfo* info = state_for(x, x->state());
1312 call_runtime(&signature, args,
1313 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1314 voidType, info);
1316 set_no_result(x);
1317 }
1320 //------------------------local access--------------------------------------
1322 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1323 if (x->operand()->is_illegal()) {
1324 Constant* c = x->as_Constant();
1325 if (c != NULL) {
1326 x->set_operand(LIR_OprFact::value_type(c->type()));
1327 } else {
1328 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1329 // allocate a virtual register for this local or phi
1330 x->set_operand(rlock(x));
1331 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1332 }
1333 }
1334 return x->operand();
1335 }
1338 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1339 if (opr->is_virtual()) {
1340 return instruction_for_vreg(opr->vreg_number());
1341 }
1342 return NULL;
1343 }
1346 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1347 if (reg_num < _instruction_for_operand.length()) {
1348 return _instruction_for_operand.at(reg_num);
1349 }
1350 return NULL;
1351 }
1354 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1355 if (_vreg_flags.size_in_bits() == 0) {
1356 BitMap2D temp(100, num_vreg_flags);
1357 temp.clear();
1358 _vreg_flags = temp;
1359 }
1360 _vreg_flags.at_put_grow(vreg_num, f, true);
1361 }
1363 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1364 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1365 return false;
1366 }
1367 return _vreg_flags.at(vreg_num, f);
1368 }
1371 // Block local constant handling. This code is useful for keeping
1372 // unpinned constants and constants which aren't exposed in the IR in
1373 // registers. Unpinned Constant instructions have their operands
1374 // cleared when the block is finished so that other blocks can't end
1375 // up referring to their registers.
1377 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1378 assert(!x->is_pinned(), "only for unpinned constants");
1379 _unpinned_constants.append(x);
1380 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1381 }
1384 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1385 BasicType t = c->type();
1386 for (int i = 0; i < _constants.length(); i++) {
1387 LIR_Const* other = _constants.at(i);
1388 if (t == other->type()) {
1389 switch (t) {
1390 case T_INT:
1391 case T_FLOAT:
1392 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1393 break;
1394 case T_LONG:
1395 case T_DOUBLE:
1396 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1397 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1398 break;
1399 case T_OBJECT:
1400 if (c->as_jobject() != other->as_jobject()) continue;
1401 break;
1402 }
1403 return _reg_for_constants.at(i);
1404 }
1405 }
1407 LIR_Opr result = new_register(t);
1408 __ move((LIR_Opr)c, result);
1409 _constants.append(c);
1410 _reg_for_constants.append(result);
1411 return result;
1412 }
1414 // Various barriers
1416 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1417 bool do_load, bool patch, CodeEmitInfo* info) {
1418 // Do the pre-write barrier, if any.
1419 switch (_bs->kind()) {
1420 #ifndef SERIALGC
1421 case BarrierSet::G1SATBCT:
1422 case BarrierSet::G1SATBCTLogging:
1423 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1424 break;
1425 #endif // SERIALGC
1426 case BarrierSet::CardTableModRef:
1427 case BarrierSet::CardTableExtension:
1428 // No pre barriers
1429 break;
1430 case BarrierSet::ModRef:
1431 case BarrierSet::Other:
1432 // No pre barriers
1433 break;
1434 default :
1435 ShouldNotReachHere();
1437 }
1438 }
1440 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1441 switch (_bs->kind()) {
1442 #ifndef SERIALGC
1443 case BarrierSet::G1SATBCT:
1444 case BarrierSet::G1SATBCTLogging:
1445 G1SATBCardTableModRef_post_barrier(addr, new_val);
1446 break;
1447 #endif // SERIALGC
1448 case BarrierSet::CardTableModRef:
1449 case BarrierSet::CardTableExtension:
1450 CardTableModRef_post_barrier(addr, new_val);
1451 break;
1452 case BarrierSet::ModRef:
1453 case BarrierSet::Other:
1454 // No post barriers
1455 break;
1456 default :
1457 ShouldNotReachHere();
1458 }
1459 }
1461 ////////////////////////////////////////////////////////////////////////
1462 #ifndef SERIALGC
1464 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1465 bool do_load, bool patch, CodeEmitInfo* info) {
1466 // First we test whether marking is in progress.
1467 BasicType flag_type;
1468 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1469 flag_type = T_INT;
1470 } else {
1471 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1472 "Assumption");
1473 flag_type = T_BYTE;
1474 }
1475 LIR_Opr thrd = getThreadPointer();
1476 LIR_Address* mark_active_flag_addr =
1477 new LIR_Address(thrd,
1478 in_bytes(JavaThread::satb_mark_queue_offset() +
1479 PtrQueue::byte_offset_of_active()),
1480 flag_type);
1481 // Read the marking-in-progress flag.
1482 LIR_Opr flag_val = new_register(T_INT);
1483 __ load(mark_active_flag_addr, flag_val);
1484 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1486 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1488 CodeStub* slow;
1490 if (do_load) {
1491 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1492 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1494 if (patch)
1495 pre_val_patch_code = lir_patch_normal;
1497 pre_val = new_register(T_OBJECT);
1499 if (!addr_opr->is_address()) {
1500 assert(addr_opr->is_register(), "must be");
1501 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1502 }
1503 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1504 } else {
1505 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1506 assert(pre_val->is_register(), "must be");
1507 assert(pre_val->type() == T_OBJECT, "must be an object");
1508 assert(info == NULL, "sanity");
1510 slow = new G1PreBarrierStub(pre_val);
1511 }
1513 __ branch(lir_cond_notEqual, T_INT, slow);
1514 __ branch_destination(slow->continuation());
1515 }
1517 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1518 // If the "new_val" is a constant NULL, no barrier is necessary.
1519 if (new_val->is_constant() &&
1520 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1522 if (!new_val->is_register()) {
1523 LIR_Opr new_val_reg = new_register(T_OBJECT);
1524 if (new_val->is_constant()) {
1525 __ move(new_val, new_val_reg);
1526 } else {
1527 __ leal(new_val, new_val_reg);
1528 }
1529 new_val = new_val_reg;
1530 }
1531 assert(new_val->is_register(), "must be a register at this point");
1533 if (addr->is_address()) {
1534 LIR_Address* address = addr->as_address_ptr();
1535 LIR_Opr ptr = new_pointer_register();
1536 if (!address->index()->is_valid() && address->disp() == 0) {
1537 __ move(address->base(), ptr);
1538 } else {
1539 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1540 __ leal(addr, ptr);
1541 }
1542 addr = ptr;
1543 }
1544 assert(addr->is_register(), "must be a register at this point");
1546 LIR_Opr xor_res = new_pointer_register();
1547 LIR_Opr xor_shift_res = new_pointer_register();
1548 if (TwoOperandLIRForm ) {
1549 __ move(addr, xor_res);
1550 __ logical_xor(xor_res, new_val, xor_res);
1551 __ move(xor_res, xor_shift_res);
1552 __ unsigned_shift_right(xor_shift_res,
1553 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1554 xor_shift_res,
1555 LIR_OprDesc::illegalOpr());
1556 } else {
1557 __ logical_xor(addr, new_val, xor_res);
1558 __ unsigned_shift_right(xor_res,
1559 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1560 xor_shift_res,
1561 LIR_OprDesc::illegalOpr());
1562 }
1564 if (!new_val->is_register()) {
1565 LIR_Opr new_val_reg = new_register(T_OBJECT);
1566 __ leal(new_val, new_val_reg);
1567 new_val = new_val_reg;
1568 }
1569 assert(new_val->is_register(), "must be a register at this point");
1571 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1573 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1574 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1575 __ branch_destination(slow->continuation());
1576 }
1578 #endif // SERIALGC
1579 ////////////////////////////////////////////////////////////////////////
1581 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1583 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1584 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1585 if (addr->is_address()) {
1586 LIR_Address* address = addr->as_address_ptr();
1587 // ptr cannot be an object because we use this barrier for array card marks
1588 // and addr can point in the middle of an array.
1589 LIR_Opr ptr = new_pointer_register();
1590 if (!address->index()->is_valid() && address->disp() == 0) {
1591 __ move(address->base(), ptr);
1592 } else {
1593 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1594 __ leal(addr, ptr);
1595 }
1596 addr = ptr;
1597 }
1598 assert(addr->is_register(), "must be a register at this point");
1600 #ifdef ARM
1601 // TODO: ARM - move to platform-dependent code
1602 LIR_Opr tmp = FrameMap::R14_opr;
1603 if (VM_Version::supports_movw()) {
1604 __ move((LIR_Opr)card_table_base, tmp);
1605 } else {
1606 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1607 }
1609 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1610 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1611 if(((int)ct->byte_map_base & 0xff) == 0) {
1612 __ move(tmp, card_addr);
1613 } else {
1614 LIR_Opr tmp_zero = new_register(T_INT);
1615 __ move(LIR_OprFact::intConst(0), tmp_zero);
1616 __ move(tmp_zero, card_addr);
1617 }
1618 #else // ARM
1619 LIR_Opr tmp = new_pointer_register();
1620 if (TwoOperandLIRForm) {
1621 __ move(addr, tmp);
1622 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1623 } else {
1624 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1625 }
1626 if (can_inline_as_constant(card_table_base)) {
1627 __ move(LIR_OprFact::intConst(0),
1628 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1629 } else {
1630 __ move(LIR_OprFact::intConst(0),
1631 new LIR_Address(tmp, load_constant(card_table_base),
1632 T_BYTE));
1633 }
1634 #endif // ARM
1635 }
1638 //------------------------field access--------------------------------------
1640 // Comment copied form templateTable_i486.cpp
1641 // ----------------------------------------------------------------------------
1642 // Volatile variables demand their effects be made known to all CPU's in
1643 // order. Store buffers on most chips allow reads & writes to reorder; the
1644 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1645 // memory barrier (i.e., it's not sufficient that the interpreter does not
1646 // reorder volatile references, the hardware also must not reorder them).
1647 //
1648 // According to the new Java Memory Model (JMM):
1649 // (1) All volatiles are serialized wrt to each other.
1650 // ALSO reads & writes act as aquire & release, so:
1651 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1652 // the read float up to before the read. It's OK for non-volatile memory refs
1653 // that happen before the volatile read to float down below it.
1654 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1655 // that happen BEFORE the write float down to after the write. It's OK for
1656 // non-volatile memory refs that happen after the volatile write to float up
1657 // before it.
1658 //
1659 // We only put in barriers around volatile refs (they are expensive), not
1660 // _between_ memory refs (that would require us to track the flavor of the
1661 // previous memory refs). Requirements (2) and (3) require some barriers
1662 // before volatile stores and after volatile loads. These nearly cover
1663 // requirement (1) but miss the volatile-store-volatile-load case. This final
1664 // case is placed after volatile-stores although it could just as well go
1665 // before volatile-loads.
1668 void LIRGenerator::do_StoreField(StoreField* x) {
1669 bool needs_patching = x->needs_patching();
1670 bool is_volatile = x->field()->is_volatile();
1671 BasicType field_type = x->field_type();
1672 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1674 CodeEmitInfo* info = NULL;
1675 if (needs_patching) {
1676 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1677 info = state_for(x, x->state_before());
1678 } else if (x->needs_null_check()) {
1679 NullCheck* nc = x->explicit_null_check();
1680 if (nc == NULL) {
1681 info = state_for(x);
1682 } else {
1683 info = state_for(nc);
1684 }
1685 }
1688 LIRItem object(x->obj(), this);
1689 LIRItem value(x->value(), this);
1691 object.load_item();
1693 if (is_volatile || needs_patching) {
1694 // load item if field is volatile (fewer special cases for volatiles)
1695 // load item if field not initialized
1696 // load item if field not constant
1697 // because of code patching we cannot inline constants
1698 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1699 value.load_byte_item();
1700 } else {
1701 value.load_item();
1702 }
1703 } else {
1704 value.load_for_store(field_type);
1705 }
1707 set_no_result(x);
1709 #ifndef PRODUCT
1710 if (PrintNotLoaded && needs_patching) {
1711 tty->print_cr(" ###class not loaded at store_%s bci %d",
1712 x->is_static() ? "static" : "field", x->printable_bci());
1713 }
1714 #endif
1716 if (x->needs_null_check() &&
1717 (needs_patching ||
1718 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1719 // emit an explicit null check because the offset is too large
1720 __ null_check(object.result(), new CodeEmitInfo(info));
1721 }
1723 LIR_Address* address;
1724 if (needs_patching) {
1725 // we need to patch the offset in the instruction so don't allow
1726 // generate_address to try to be smart about emitting the -1.
1727 // Otherwise the patching code won't know how to find the
1728 // instruction to patch.
1729 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1730 } else {
1731 address = generate_address(object.result(), x->offset(), field_type);
1732 }
1734 if (is_volatile && os::is_MP()) {
1735 __ membar_release();
1736 }
1738 if (is_oop) {
1739 // Do the pre-write barrier, if any.
1740 pre_barrier(LIR_OprFact::address(address),
1741 LIR_OprFact::illegalOpr /* pre_val */,
1742 true /* do_load*/,
1743 needs_patching,
1744 (info ? new CodeEmitInfo(info) : NULL));
1745 }
1747 if (is_volatile && !needs_patching) {
1748 volatile_field_store(value.result(), address, info);
1749 } else {
1750 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1751 __ store(value.result(), address, info, patch_code);
1752 }
1754 if (is_oop) {
1755 // Store to object so mark the card of the header
1756 post_barrier(object.result(), value.result());
1757 }
1759 if (is_volatile && os::is_MP()) {
1760 __ membar();
1761 }
1762 }
1765 void LIRGenerator::do_LoadField(LoadField* x) {
1766 bool needs_patching = x->needs_patching();
1767 bool is_volatile = x->field()->is_volatile();
1768 BasicType field_type = x->field_type();
1770 CodeEmitInfo* info = NULL;
1771 if (needs_patching) {
1772 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1773 info = state_for(x, x->state_before());
1774 } else if (x->needs_null_check()) {
1775 NullCheck* nc = x->explicit_null_check();
1776 if (nc == NULL) {
1777 info = state_for(x);
1778 } else {
1779 info = state_for(nc);
1780 }
1781 }
1783 LIRItem object(x->obj(), this);
1785 object.load_item();
1787 #ifndef PRODUCT
1788 if (PrintNotLoaded && needs_patching) {
1789 tty->print_cr(" ###class not loaded at load_%s bci %d",
1790 x->is_static() ? "static" : "field", x->printable_bci());
1791 }
1792 #endif
1794 if (x->needs_null_check() &&
1795 (needs_patching ||
1796 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1797 // emit an explicit null check because the offset is too large
1798 __ null_check(object.result(), new CodeEmitInfo(info));
1799 }
1801 LIR_Opr reg = rlock_result(x, field_type);
1802 LIR_Address* address;
1803 if (needs_patching) {
1804 // we need to patch the offset in the instruction so don't allow
1805 // generate_address to try to be smart about emitting the -1.
1806 // Otherwise the patching code won't know how to find the
1807 // instruction to patch.
1808 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1809 } else {
1810 address = generate_address(object.result(), x->offset(), field_type);
1811 }
1813 if (is_volatile && !needs_patching) {
1814 volatile_field_load(address, reg, info);
1815 } else {
1816 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1817 __ load(address, reg, info, patch_code);
1818 }
1820 if (is_volatile && os::is_MP()) {
1821 __ membar_acquire();
1822 }
1823 }
1826 //------------------------java.nio.Buffer.checkIndex------------------------
1828 // int java.nio.Buffer.checkIndex(int)
1829 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1830 // NOTE: by the time we are in checkIndex() we are guaranteed that
1831 // the buffer is non-null (because checkIndex is package-private and
1832 // only called from within other methods in the buffer).
1833 assert(x->number_of_arguments() == 2, "wrong type");
1834 LIRItem buf (x->argument_at(0), this);
1835 LIRItem index(x->argument_at(1), this);
1836 buf.load_item();
1837 index.load_item();
1839 LIR_Opr result = rlock_result(x);
1840 if (GenerateRangeChecks) {
1841 CodeEmitInfo* info = state_for(x);
1842 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1843 if (index.result()->is_constant()) {
1844 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1845 __ branch(lir_cond_belowEqual, T_INT, stub);
1846 } else {
1847 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1848 java_nio_Buffer::limit_offset(), T_INT, info);
1849 __ branch(lir_cond_aboveEqual, T_INT, stub);
1850 }
1851 __ move(index.result(), result);
1852 } else {
1853 // Just load the index into the result register
1854 __ move(index.result(), result);
1855 }
1856 }
1859 //------------------------array access--------------------------------------
1862 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1863 LIRItem array(x->array(), this);
1864 array.load_item();
1865 LIR_Opr reg = rlock_result(x);
1867 CodeEmitInfo* info = NULL;
1868 if (x->needs_null_check()) {
1869 NullCheck* nc = x->explicit_null_check();
1870 if (nc == NULL) {
1871 info = state_for(x);
1872 } else {
1873 info = state_for(nc);
1874 }
1875 }
1876 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1877 }
1880 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1881 bool use_length = x->length() != NULL;
1882 LIRItem array(x->array(), this);
1883 LIRItem index(x->index(), this);
1884 LIRItem length(this);
1885 bool needs_range_check = true;
1887 if (use_length) {
1888 needs_range_check = x->compute_needs_range_check();
1889 if (needs_range_check) {
1890 length.set_instruction(x->length());
1891 length.load_item();
1892 }
1893 }
1895 array.load_item();
1896 if (index.is_constant() && can_inline_as_constant(x->index())) {
1897 // let it be a constant
1898 index.dont_load_item();
1899 } else {
1900 index.load_item();
1901 }
1903 CodeEmitInfo* range_check_info = state_for(x);
1904 CodeEmitInfo* null_check_info = NULL;
1905 if (x->needs_null_check()) {
1906 NullCheck* nc = x->explicit_null_check();
1907 if (nc != NULL) {
1908 null_check_info = state_for(nc);
1909 } else {
1910 null_check_info = range_check_info;
1911 }
1912 }
1914 // emit array address setup early so it schedules better
1915 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1917 if (GenerateRangeChecks && needs_range_check) {
1918 if (use_length) {
1919 // TODO: use a (modified) version of array_range_check that does not require a
1920 // constant length to be loaded to a register
1921 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1922 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1923 } else {
1924 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1925 // The range check performs the null check, so clear it out for the load
1926 null_check_info = NULL;
1927 }
1928 }
1930 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1931 }
1934 void LIRGenerator::do_NullCheck(NullCheck* x) {
1935 if (x->can_trap()) {
1936 LIRItem value(x->obj(), this);
1937 value.load_item();
1938 CodeEmitInfo* info = state_for(x);
1939 __ null_check(value.result(), info);
1940 }
1941 }
1944 void LIRGenerator::do_TypeCast(TypeCast* x) {
1945 LIRItem value(x->obj(), this);
1946 value.load_item();
1947 // the result is the same as from the node we are casting
1948 set_result(x, value.result());
1949 }
1952 void LIRGenerator::do_Throw(Throw* x) {
1953 LIRItem exception(x->exception(), this);
1954 exception.load_item();
1955 set_no_result(x);
1956 LIR_Opr exception_opr = exception.result();
1957 CodeEmitInfo* info = state_for(x, x->state());
1959 #ifndef PRODUCT
1960 if (PrintC1Statistics) {
1961 increment_counter(Runtime1::throw_count_address(), T_INT);
1962 }
1963 #endif
1965 // check if the instruction has an xhandler in any of the nested scopes
1966 bool unwind = false;
1967 if (info->exception_handlers()->length() == 0) {
1968 // this throw is not inside an xhandler
1969 unwind = true;
1970 } else {
1971 // get some idea of the throw type
1972 bool type_is_exact = true;
1973 ciType* throw_type = x->exception()->exact_type();
1974 if (throw_type == NULL) {
1975 type_is_exact = false;
1976 throw_type = x->exception()->declared_type();
1977 }
1978 if (throw_type != NULL && throw_type->is_instance_klass()) {
1979 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1980 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1981 }
1982 }
1984 // do null check before moving exception oop into fixed register
1985 // to avoid a fixed interval with an oop during the null check.
1986 // Use a copy of the CodeEmitInfo because debug information is
1987 // different for null_check and throw.
1988 if (GenerateCompilerNullChecks &&
1989 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1990 // if the exception object wasn't created using new then it might be null.
1991 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1992 }
1994 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1995 // we need to go through the exception lookup path to get JVMTI
1996 // notification done
1997 unwind = false;
1998 }
2000 // move exception oop into fixed register
2001 __ move(exception_opr, exceptionOopOpr());
2003 if (unwind) {
2004 __ unwind_exception(exceptionOopOpr());
2005 } else {
2006 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2007 }
2008 }
2011 void LIRGenerator::do_RoundFP(RoundFP* x) {
2012 LIRItem input(x->input(), this);
2013 input.load_item();
2014 LIR_Opr input_opr = input.result();
2015 assert(input_opr->is_register(), "why round if value is not in a register?");
2016 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2017 if (input_opr->is_single_fpu()) {
2018 set_result(x, round_item(input_opr)); // This code path not currently taken
2019 } else {
2020 LIR_Opr result = new_register(T_DOUBLE);
2021 set_vreg_flag(result, must_start_in_memory);
2022 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2023 set_result(x, result);
2024 }
2025 }
2027 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2028 LIRItem base(x->base(), this);
2029 LIRItem idx(this);
2031 base.load_item();
2032 if (x->has_index()) {
2033 idx.set_instruction(x->index());
2034 idx.load_nonconstant();
2035 }
2037 LIR_Opr reg = rlock_result(x, x->basic_type());
2039 int log2_scale = 0;
2040 if (x->has_index()) {
2041 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2042 log2_scale = x->log2_scale();
2043 }
2045 assert(!x->has_index() || idx.value() == x->index(), "should match");
2047 LIR_Opr base_op = base.result();
2048 #ifndef _LP64
2049 if (x->base()->type()->tag() == longTag) {
2050 base_op = new_register(T_INT);
2051 __ convert(Bytecodes::_l2i, base.result(), base_op);
2052 } else {
2053 assert(x->base()->type()->tag() == intTag, "must be");
2054 }
2055 #endif
2057 BasicType dst_type = x->basic_type();
2058 LIR_Opr index_op = idx.result();
2060 LIR_Address* addr;
2061 if (index_op->is_constant()) {
2062 assert(log2_scale == 0, "must not have a scale");
2063 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2064 } else {
2065 #ifdef X86
2066 #ifdef _LP64
2067 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2068 LIR_Opr tmp = new_pointer_register();
2069 __ convert(Bytecodes::_i2l, index_op, tmp);
2070 index_op = tmp;
2071 }
2072 #endif
2073 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2074 #elif defined(ARM)
2075 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2076 #else
2077 if (index_op->is_illegal() || log2_scale == 0) {
2078 #ifdef _LP64
2079 if (!index_op->is_illegal() && index_op->type() == T_INT) {
2080 LIR_Opr tmp = new_pointer_register();
2081 __ convert(Bytecodes::_i2l, index_op, tmp);
2082 index_op = tmp;
2083 }
2084 #endif
2085 addr = new LIR_Address(base_op, index_op, dst_type);
2086 } else {
2087 LIR_Opr tmp = new_pointer_register();
2088 __ shift_left(index_op, log2_scale, tmp);
2089 addr = new LIR_Address(base_op, tmp, dst_type);
2090 }
2091 #endif
2092 }
2094 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2095 __ unaligned_move(addr, reg);
2096 } else {
2097 if (dst_type == T_OBJECT && x->is_wide()) {
2098 __ move_wide(addr, reg);
2099 } else {
2100 __ move(addr, reg);
2101 }
2102 }
2103 }
2106 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2107 int log2_scale = 0;
2108 BasicType type = x->basic_type();
2110 if (x->has_index()) {
2111 assert(x->index()->type()->tag() == intTag, "should not find non-int index");
2112 log2_scale = x->log2_scale();
2113 }
2115 LIRItem base(x->base(), this);
2116 LIRItem value(x->value(), this);
2117 LIRItem idx(this);
2119 base.load_item();
2120 if (x->has_index()) {
2121 idx.set_instruction(x->index());
2122 idx.load_item();
2123 }
2125 if (type == T_BYTE || type == T_BOOLEAN) {
2126 value.load_byte_item();
2127 } else {
2128 value.load_item();
2129 }
2131 set_no_result(x);
2133 LIR_Opr base_op = base.result();
2134 #ifndef _LP64
2135 if (x->base()->type()->tag() == longTag) {
2136 base_op = new_register(T_INT);
2137 __ convert(Bytecodes::_l2i, base.result(), base_op);
2138 } else {
2139 assert(x->base()->type()->tag() == intTag, "must be");
2140 }
2141 #endif
2143 LIR_Opr index_op = idx.result();
2144 if (log2_scale != 0) {
2145 // temporary fix (platform dependent code without shift on Intel would be better)
2146 index_op = new_pointer_register();
2147 #ifdef _LP64
2148 if(idx.result()->type() == T_INT) {
2149 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2150 } else {
2151 #endif
2152 // TODO: ARM also allows embedded shift in the address
2153 __ move(idx.result(), index_op);
2154 #ifdef _LP64
2155 }
2156 #endif
2157 __ shift_left(index_op, log2_scale, index_op);
2158 }
2159 #ifdef _LP64
2160 else if(!index_op->is_illegal() && index_op->type() == T_INT) {
2161 LIR_Opr tmp = new_pointer_register();
2162 __ convert(Bytecodes::_i2l, index_op, tmp);
2163 index_op = tmp;
2164 }
2165 #endif
2167 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2168 __ move(value.result(), addr);
2169 }
2172 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2173 BasicType type = x->basic_type();
2174 LIRItem src(x->object(), this);
2175 LIRItem off(x->offset(), this);
2177 off.load_item();
2178 src.load_item();
2180 LIR_Opr value = rlock_result(x, x->basic_type());
2182 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2184 #ifndef SERIALGC
2185 // We might be reading the value of the referent field of a
2186 // Reference object in order to attach it back to the live
2187 // object graph. If G1 is enabled then we need to record
2188 // the value that is being returned in an SATB log buffer.
2189 //
2190 // We need to generate code similar to the following...
2191 //
2192 // if (offset == java_lang_ref_Reference::referent_offset) {
2193 // if (src != NULL) {
2194 // if (klass(src)->reference_type() != REF_NONE) {
2195 // pre_barrier(..., value, ...);
2196 // }
2197 // }
2198 // }
2200 if (UseG1GC && type == T_OBJECT) {
2201 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2202 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2203 bool gen_source_check = true; // Assume we need to check the src object for null.
2204 bool gen_type_check = true; // Assume we need to check the reference_type.
2206 if (off.is_constant()) {
2207 jlong off_con = (off.type()->is_int() ?
2208 (jlong) off.get_jint_constant() :
2209 off.get_jlong_constant());
2212 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2213 // The constant offset is something other than referent_offset.
2214 // We can skip generating/checking the remaining guards and
2215 // skip generation of the code stub.
2216 gen_pre_barrier = false;
2217 } else {
2218 // The constant offset is the same as referent_offset -
2219 // we do not need to generate a runtime offset check.
2220 gen_offset_check = false;
2221 }
2222 }
2224 // We don't need to generate stub if the source object is an array
2225 if (gen_pre_barrier && src.type()->is_array()) {
2226 gen_pre_barrier = false;
2227 }
2229 if (gen_pre_barrier) {
2230 // We still need to continue with the checks.
2231 if (src.is_constant()) {
2232 ciObject* src_con = src.get_jobject_constant();
2234 if (src_con->is_null_object()) {
2235 // The constant src object is null - We can skip
2236 // generating the code stub.
2237 gen_pre_barrier = false;
2238 } else {
2239 // Non-null constant source object. We still have to generate
2240 // the slow stub - but we don't need to generate the runtime
2241 // null object check.
2242 gen_source_check = false;
2243 }
2244 }
2245 }
2246 if (gen_pre_barrier && !PatchALot) {
2247 // Can the klass of object be statically determined to be
2248 // a sub-class of Reference?
2249 ciType* type = src.value()->declared_type();
2250 if ((type != NULL) && type->is_loaded()) {
2251 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2252 gen_type_check = false;
2253 } else if (type->is_klass() &&
2254 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2255 // Not Reference and not Object klass.
2256 gen_pre_barrier = false;
2257 }
2258 }
2259 }
2261 if (gen_pre_barrier) {
2262 LabelObj* Lcont = new LabelObj();
2264 // We can have generate one runtime check here. Let's start with
2265 // the offset check.
2266 if (gen_offset_check) {
2267 // if (offset != referent_offset) -> continue
2268 // If offset is an int then we can do the comparison with the
2269 // referent_offset constant; otherwise we need to move
2270 // referent_offset into a temporary register and generate
2271 // a reg-reg compare.
2273 LIR_Opr referent_off;
2275 if (off.type()->is_int()) {
2276 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2277 } else {
2278 assert(off.type()->is_long(), "what else?");
2279 referent_off = new_register(T_LONG);
2280 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2281 }
2282 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2283 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2284 }
2285 if (gen_source_check) {
2286 // offset is a const and equals referent offset
2287 // if (source == null) -> continue
2288 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2289 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2290 }
2291 LIR_Opr src_klass = new_register(T_OBJECT);
2292 if (gen_type_check) {
2293 // We have determined that offset == referent_offset && src != null.
2294 // if (src->_klass->_reference_type == REF_NONE) -> continue
2295 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), src_klass);
2296 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2297 LIR_Opr reference_type = new_register(T_INT);
2298 __ move(reference_type_addr, reference_type);
2299 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2300 __ branch(lir_cond_equal, T_INT, Lcont->label());
2301 }
2302 {
2303 // We have determined that src->_klass->_reference_type != REF_NONE
2304 // so register the value in the referent field with the pre-barrier.
2305 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2306 value /* pre_val */,
2307 false /* do_load */,
2308 false /* patch */,
2309 NULL /* info */);
2310 }
2311 __ branch_destination(Lcont->label());
2312 }
2313 }
2314 #endif // SERIALGC
2316 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2317 }
2320 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2321 BasicType type = x->basic_type();
2322 LIRItem src(x->object(), this);
2323 LIRItem off(x->offset(), this);
2324 LIRItem data(x->value(), this);
2326 src.load_item();
2327 if (type == T_BOOLEAN || type == T_BYTE) {
2328 data.load_byte_item();
2329 } else {
2330 data.load_item();
2331 }
2332 off.load_item();
2334 set_no_result(x);
2336 if (x->is_volatile() && os::is_MP()) __ membar_release();
2337 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2338 if (x->is_volatile() && os::is_MP()) __ membar();
2339 }
2342 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2343 LIRItem src(x->object(), this);
2344 LIRItem off(x->offset(), this);
2346 src.load_item();
2347 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2348 // let it be a constant
2349 off.dont_load_item();
2350 } else {
2351 off.load_item();
2352 }
2354 set_no_result(x);
2356 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2357 __ prefetch(addr, is_store);
2358 }
2361 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2362 do_UnsafePrefetch(x, false);
2363 }
2366 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2367 do_UnsafePrefetch(x, true);
2368 }
2371 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2372 int lng = x->length();
2374 for (int i = 0; i < lng; i++) {
2375 SwitchRange* one_range = x->at(i);
2376 int low_key = one_range->low_key();
2377 int high_key = one_range->high_key();
2378 BlockBegin* dest = one_range->sux();
2379 if (low_key == high_key) {
2380 __ cmp(lir_cond_equal, value, low_key);
2381 __ branch(lir_cond_equal, T_INT, dest);
2382 } else if (high_key - low_key == 1) {
2383 __ cmp(lir_cond_equal, value, low_key);
2384 __ branch(lir_cond_equal, T_INT, dest);
2385 __ cmp(lir_cond_equal, value, high_key);
2386 __ branch(lir_cond_equal, T_INT, dest);
2387 } else {
2388 LabelObj* L = new LabelObj();
2389 __ cmp(lir_cond_less, value, low_key);
2390 __ branch(lir_cond_less, T_INT, L->label());
2391 __ cmp(lir_cond_lessEqual, value, high_key);
2392 __ branch(lir_cond_lessEqual, T_INT, dest);
2393 __ branch_destination(L->label());
2394 }
2395 }
2396 __ jump(default_sux);
2397 }
2400 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2401 SwitchRangeList* res = new SwitchRangeList();
2402 int len = x->length();
2403 if (len > 0) {
2404 BlockBegin* sux = x->sux_at(0);
2405 int key = x->lo_key();
2406 BlockBegin* default_sux = x->default_sux();
2407 SwitchRange* range = new SwitchRange(key, sux);
2408 for (int i = 0; i < len; i++, key++) {
2409 BlockBegin* new_sux = x->sux_at(i);
2410 if (sux == new_sux) {
2411 // still in same range
2412 range->set_high_key(key);
2413 } else {
2414 // skip tests which explicitly dispatch to the default
2415 if (sux != default_sux) {
2416 res->append(range);
2417 }
2418 range = new SwitchRange(key, new_sux);
2419 }
2420 sux = new_sux;
2421 }
2422 if (res->length() == 0 || res->last() != range) res->append(range);
2423 }
2424 return res;
2425 }
2428 // we expect the keys to be sorted by increasing value
2429 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2430 SwitchRangeList* res = new SwitchRangeList();
2431 int len = x->length();
2432 if (len > 0) {
2433 BlockBegin* default_sux = x->default_sux();
2434 int key = x->key_at(0);
2435 BlockBegin* sux = x->sux_at(0);
2436 SwitchRange* range = new SwitchRange(key, sux);
2437 for (int i = 1; i < len; i++) {
2438 int new_key = x->key_at(i);
2439 BlockBegin* new_sux = x->sux_at(i);
2440 if (key+1 == new_key && sux == new_sux) {
2441 // still in same range
2442 range->set_high_key(new_key);
2443 } else {
2444 // skip tests which explicitly dispatch to the default
2445 if (range->sux() != default_sux) {
2446 res->append(range);
2447 }
2448 range = new SwitchRange(new_key, new_sux);
2449 }
2450 key = new_key;
2451 sux = new_sux;
2452 }
2453 if (res->length() == 0 || res->last() != range) res->append(range);
2454 }
2455 return res;
2456 }
2459 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2460 LIRItem tag(x->tag(), this);
2461 tag.load_item();
2462 set_no_result(x);
2464 if (x->is_safepoint()) {
2465 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2466 }
2468 // move values into phi locations
2469 move_to_phi(x->state());
2471 int lo_key = x->lo_key();
2472 int hi_key = x->hi_key();
2473 int len = x->length();
2474 LIR_Opr value = tag.result();
2475 if (UseTableRanges) {
2476 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2477 } else {
2478 for (int i = 0; i < len; i++) {
2479 __ cmp(lir_cond_equal, value, i + lo_key);
2480 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2481 }
2482 __ jump(x->default_sux());
2483 }
2484 }
2487 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2488 LIRItem tag(x->tag(), this);
2489 tag.load_item();
2490 set_no_result(x);
2492 if (x->is_safepoint()) {
2493 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2494 }
2496 // move values into phi locations
2497 move_to_phi(x->state());
2499 LIR_Opr value = tag.result();
2500 if (UseTableRanges) {
2501 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2502 } else {
2503 int len = x->length();
2504 for (int i = 0; i < len; i++) {
2505 __ cmp(lir_cond_equal, value, x->key_at(i));
2506 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2507 }
2508 __ jump(x->default_sux());
2509 }
2510 }
2513 void LIRGenerator::do_Goto(Goto* x) {
2514 set_no_result(x);
2516 if (block()->next()->as_OsrEntry()) {
2517 // need to free up storage used for OSR entry point
2518 LIR_Opr osrBuffer = block()->next()->operand();
2519 BasicTypeList signature;
2520 signature.append(T_INT);
2521 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2522 __ move(osrBuffer, cc->args()->at(0));
2523 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2524 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2525 }
2527 if (x->is_safepoint()) {
2528 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2530 // increment backedge counter if needed
2531 CodeEmitInfo* info = state_for(x, state);
2532 increment_backedge_counter(info, x->profiled_bci());
2533 CodeEmitInfo* safepoint_info = state_for(x, state);
2534 __ safepoint(safepoint_poll_register(), safepoint_info);
2535 }
2537 // Gotos can be folded Ifs, handle this case.
2538 if (x->should_profile()) {
2539 ciMethod* method = x->profiled_method();
2540 assert(method != NULL, "method should be set if branch is profiled");
2541 ciMethodData* md = method->method_data_or_null();
2542 assert(md != NULL, "Sanity");
2543 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2544 assert(data != NULL, "must have profiling data");
2545 int offset;
2546 if (x->direction() == Goto::taken) {
2547 assert(data->is_BranchData(), "need BranchData for two-way branches");
2548 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2549 } else if (x->direction() == Goto::not_taken) {
2550 assert(data->is_BranchData(), "need BranchData for two-way branches");
2551 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2552 } else {
2553 assert(data->is_JumpData(), "need JumpData for branches");
2554 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2555 }
2556 LIR_Opr md_reg = new_register(T_OBJECT);
2557 __ oop2reg(md->constant_encoding(), md_reg);
2559 increment_counter(new LIR_Address(md_reg, offset,
2560 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2561 }
2563 // emit phi-instruction move after safepoint since this simplifies
2564 // describing the state as the safepoint.
2565 move_to_phi(x->state());
2567 __ jump(x->default_sux());
2568 }
2571 void LIRGenerator::do_Base(Base* x) {
2572 __ std_entry(LIR_OprFact::illegalOpr);
2573 // Emit moves from physical registers / stack slots to virtual registers
2574 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2575 IRScope* irScope = compilation()->hir()->top_scope();
2576 int java_index = 0;
2577 for (int i = 0; i < args->length(); i++) {
2578 LIR_Opr src = args->at(i);
2579 assert(!src->is_illegal(), "check");
2580 BasicType t = src->type();
2582 // Types which are smaller than int are passed as int, so
2583 // correct the type which passed.
2584 switch (t) {
2585 case T_BYTE:
2586 case T_BOOLEAN:
2587 case T_SHORT:
2588 case T_CHAR:
2589 t = T_INT;
2590 break;
2591 }
2593 LIR_Opr dest = new_register(t);
2594 __ move(src, dest);
2596 // Assign new location to Local instruction for this local
2597 Local* local = x->state()->local_at(java_index)->as_Local();
2598 assert(local != NULL, "Locals for incoming arguments must have been created");
2599 #ifndef __SOFTFP__
2600 // The java calling convention passes double as long and float as int.
2601 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2602 #endif // __SOFTFP__
2603 local->set_operand(dest);
2604 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2605 java_index += type2size[t];
2606 }
2608 if (compilation()->env()->dtrace_method_probes()) {
2609 BasicTypeList signature;
2610 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2611 signature.append(T_OBJECT); // Method*
2612 LIR_OprList* args = new LIR_OprList();
2613 args->append(getThreadPointer());
2614 LIR_Opr meth = new_register(T_OBJECT);
2615 __ oop2reg(method()->constant_encoding(), meth);
2616 args->append(meth);
2617 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2618 }
2620 if (method()->is_synchronized()) {
2621 LIR_Opr obj;
2622 if (method()->is_static()) {
2623 obj = new_register(T_OBJECT);
2624 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2625 } else {
2626 Local* receiver = x->state()->local_at(0)->as_Local();
2627 assert(receiver != NULL, "must already exist");
2628 obj = receiver->operand();
2629 }
2630 assert(obj->is_valid(), "must be valid");
2632 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2633 LIR_Opr lock = new_register(T_INT);
2634 __ load_stack_address_monitor(0, lock);
2636 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2637 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2639 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2640 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2641 }
2642 }
2644 // increment invocation counters if needed
2645 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2646 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
2647 increment_invocation_counter(info);
2648 }
2650 // all blocks with a successor must end with an unconditional jump
2651 // to the successor even if they are consecutive
2652 __ jump(x->default_sux());
2653 }
2656 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2657 // construct our frame and model the production of incoming pointer
2658 // to the OSR buffer.
2659 __ osr_entry(LIR_Assembler::osrBufferPointer());
2660 LIR_Opr result = rlock_result(x);
2661 __ move(LIR_Assembler::osrBufferPointer(), result);
2662 }
2665 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2666 assert(args->length() == arg_list->length(),
2667 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2668 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2669 LIRItem* param = args->at(i);
2670 LIR_Opr loc = arg_list->at(i);
2671 if (loc->is_register()) {
2672 param->load_item_force(loc);
2673 } else {
2674 LIR_Address* addr = loc->as_address_ptr();
2675 param->load_for_store(addr->type());
2676 if (addr->type() == T_OBJECT) {
2677 __ move_wide(param->result(), addr);
2678 } else
2679 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2680 __ unaligned_move(param->result(), addr);
2681 } else {
2682 __ move(param->result(), addr);
2683 }
2684 }
2685 }
2687 if (x->has_receiver()) {
2688 LIRItem* receiver = args->at(0);
2689 LIR_Opr loc = arg_list->at(0);
2690 if (loc->is_register()) {
2691 receiver->load_item_force(loc);
2692 } else {
2693 assert(loc->is_address(), "just checking");
2694 receiver->load_for_store(T_OBJECT);
2695 __ move_wide(receiver->result(), loc->as_address_ptr());
2696 }
2697 }
2698 }
2701 // Visits all arguments, returns appropriate items without loading them
2702 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2703 LIRItemList* argument_items = new LIRItemList();
2704 if (x->has_receiver()) {
2705 LIRItem* receiver = new LIRItem(x->receiver(), this);
2706 argument_items->append(receiver);
2707 }
2708 for (int i = 0; i < x->number_of_arguments(); i++) {
2709 LIRItem* param = new LIRItem(x->argument_at(i), this);
2710 argument_items->append(param);
2711 }
2712 return argument_items;
2713 }
2716 // The invoke with receiver has following phases:
2717 // a) traverse and load/lock receiver;
2718 // b) traverse all arguments -> item-array (invoke_visit_argument)
2719 // c) push receiver on stack
2720 // d) load each of the items and push on stack
2721 // e) unlock receiver
2722 // f) move receiver into receiver-register %o0
2723 // g) lock result registers and emit call operation
2724 //
2725 // Before issuing a call, we must spill-save all values on stack
2726 // that are in caller-save register. "spill-save" moves thos registers
2727 // either in a free callee-save register or spills them if no free
2728 // callee save register is available.
2729 //
2730 // The problem is where to invoke spill-save.
2731 // - if invoked between e) and f), we may lock callee save
2732 // register in "spill-save" that destroys the receiver register
2733 // before f) is executed
2734 // - if we rearange the f) to be earlier, by loading %o0, it
2735 // may destroy a value on the stack that is currently in %o0
2736 // and is waiting to be spilled
2737 // - if we keep the receiver locked while doing spill-save,
2738 // we cannot spill it as it is spill-locked
2739 //
2740 void LIRGenerator::do_Invoke(Invoke* x) {
2741 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2743 LIR_OprList* arg_list = cc->args();
2744 LIRItemList* args = invoke_visit_arguments(x);
2745 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2747 // setup result register
2748 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2749 if (x->type() != voidType) {
2750 result_register = result_register_for(x->type());
2751 }
2753 CodeEmitInfo* info = state_for(x, x->state());
2755 invoke_load_arguments(x, args, arg_list);
2757 if (x->has_receiver()) {
2758 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2759 receiver = args->at(0)->result();
2760 }
2762 // emit invoke code
2763 bool optimized = x->target_is_loaded() && x->target_is_final();
2764 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2766 // JSR 292
2767 // Preserve the SP over MethodHandle call sites.
2768 ciMethod* target = x->target();
2769 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2770 target->is_method_handle_intrinsic() ||
2771 target->is_compiled_lambda_form());
2772 if (is_method_handle_invoke) {
2773 info->set_is_method_handle_invoke(true);
2774 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2775 }
2777 switch (x->code()) {
2778 case Bytecodes::_invokestatic:
2779 __ call_static(target, result_register,
2780 SharedRuntime::get_resolve_static_call_stub(),
2781 arg_list, info);
2782 break;
2783 case Bytecodes::_invokespecial:
2784 case Bytecodes::_invokevirtual:
2785 case Bytecodes::_invokeinterface:
2786 // for final target we still produce an inline cache, in order
2787 // to be able to call mixed mode
2788 if (x->code() == Bytecodes::_invokespecial || optimized) {
2789 __ call_opt_virtual(target, receiver, result_register,
2790 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2791 arg_list, info);
2792 } else if (x->vtable_index() < 0) {
2793 __ call_icvirtual(target, receiver, result_register,
2794 SharedRuntime::get_resolve_virtual_call_stub(),
2795 arg_list, info);
2796 } else {
2797 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2798 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2799 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2800 }
2801 break;
2802 case Bytecodes::_invokedynamic: {
2803 __ call_dynamic(target, receiver, result_register,
2804 SharedRuntime::get_resolve_static_call_stub(),
2805 arg_list, info);
2806 break;
2807 }
2808 default:
2809 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2810 break;
2811 }
2813 // JSR 292
2814 // Restore the SP after MethodHandle call sites.
2815 if (is_method_handle_invoke) {
2816 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2817 }
2819 if (x->type()->is_float() || x->type()->is_double()) {
2820 // Force rounding of results from non-strictfp when in strictfp
2821 // scope (or when we don't know the strictness of the callee, to
2822 // be safe.)
2823 if (method()->is_strict()) {
2824 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
2825 result_register = round_item(result_register);
2826 }
2827 }
2828 }
2830 if (result_register->is_valid()) {
2831 LIR_Opr result = rlock_result(x);
2832 __ move(result_register, result);
2833 }
2834 }
2837 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
2838 assert(x->number_of_arguments() == 1, "wrong type");
2839 LIRItem value (x->argument_at(0), this);
2840 LIR_Opr reg = rlock_result(x);
2841 value.load_item();
2842 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
2843 __ move(tmp, reg);
2844 }
2848 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
2849 void LIRGenerator::do_IfOp(IfOp* x) {
2850 #ifdef ASSERT
2851 {
2852 ValueTag xtag = x->x()->type()->tag();
2853 ValueTag ttag = x->tval()->type()->tag();
2854 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
2855 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
2856 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
2857 }
2858 #endif
2860 LIRItem left(x->x(), this);
2861 LIRItem right(x->y(), this);
2862 left.load_item();
2863 if (can_inline_as_constant(right.value())) {
2864 right.dont_load_item();
2865 } else {
2866 right.load_item();
2867 }
2869 LIRItem t_val(x->tval(), this);
2870 LIRItem f_val(x->fval(), this);
2871 t_val.dont_load_item();
2872 f_val.dont_load_item();
2873 LIR_Opr reg = rlock_result(x);
2875 __ cmp(lir_cond(x->cond()), left.result(), right.result());
2876 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
2877 }
2879 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
2880 assert(x->number_of_arguments() == expected_arguments, "wrong type");
2881 LIR_Opr reg = result_register_for(x->type());
2882 __ call_runtime_leaf(routine, getThreadTemp(),
2883 reg, new LIR_OprList());
2884 LIR_Opr result = rlock_result(x);
2885 __ move(reg, result);
2886 }
2888 #ifdef TRACE_HAVE_INTRINSICS
2889 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
2890 LIR_Opr thread = getThreadPointer();
2891 LIR_Opr osthread = new_pointer_register();
2892 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
2893 size_t thread_id_size = OSThread::thread_id_size();
2894 if (thread_id_size == (size_t) BytesPerLong) {
2895 LIR_Opr id = new_register(T_LONG);
2896 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
2897 __ convert(Bytecodes::_l2i, id, rlock_result(x));
2898 } else if (thread_id_size == (size_t) BytesPerInt) {
2899 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
2900 } else {
2901 ShouldNotReachHere();
2902 }
2903 }
2905 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
2906 CodeEmitInfo* info = state_for(x);
2907 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
2908 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
2909 assert(info != NULL, "must have info");
2910 LIRItem arg(x->argument_at(1), this);
2911 arg.load_item();
2912 LIR_Opr klass = new_pointer_register();
2913 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
2914 LIR_Opr id = new_register(T_LONG);
2915 ByteSize offset = TRACE_ID_OFFSET;
2916 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
2917 __ move(trace_id_addr, id);
2918 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
2919 __ store(id, trace_id_addr);
2920 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
2921 __ move(id, rlock_result(x));
2922 }
2923 #endif
2925 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
2926 switch (x->id()) {
2927 case vmIntrinsics::_intBitsToFloat :
2928 case vmIntrinsics::_doubleToRawLongBits :
2929 case vmIntrinsics::_longBitsToDouble :
2930 case vmIntrinsics::_floatToRawIntBits : {
2931 do_FPIntrinsics(x);
2932 break;
2933 }
2935 #ifdef TRACE_HAVE_INTRINSICS
2936 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
2937 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
2938 case vmIntrinsics::_counterTime:
2939 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
2940 break;
2941 #endif
2943 case vmIntrinsics::_currentTimeMillis:
2944 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
2945 break;
2947 case vmIntrinsics::_nanoTime:
2948 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
2949 break;
2951 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
2952 case vmIntrinsics::_isInstance: do_isInstance(x); break;
2953 case vmIntrinsics::_getClass: do_getClass(x); break;
2954 case vmIntrinsics::_currentThread: do_currentThread(x); break;
2956 case vmIntrinsics::_dlog: // fall through
2957 case vmIntrinsics::_dlog10: // fall through
2958 case vmIntrinsics::_dabs: // fall through
2959 case vmIntrinsics::_dsqrt: // fall through
2960 case vmIntrinsics::_dtan: // fall through
2961 case vmIntrinsics::_dsin : // fall through
2962 case vmIntrinsics::_dcos : // fall through
2963 case vmIntrinsics::_dexp : // fall through
2964 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
2965 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
2967 // java.nio.Buffer.checkIndex
2968 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
2970 case vmIntrinsics::_compareAndSwapObject:
2971 do_CompareAndSwap(x, objectType);
2972 break;
2973 case vmIntrinsics::_compareAndSwapInt:
2974 do_CompareAndSwap(x, intType);
2975 break;
2976 case vmIntrinsics::_compareAndSwapLong:
2977 do_CompareAndSwap(x, longType);
2978 break;
2980 case vmIntrinsics::_Reference_get:
2981 do_Reference_get(x);
2982 break;
2984 default: ShouldNotReachHere(); break;
2985 }
2986 }
2988 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
2989 // Need recv in a temporary register so it interferes with the other temporaries
2990 LIR_Opr recv = LIR_OprFact::illegalOpr;
2991 LIR_Opr mdo = new_register(T_OBJECT);
2992 // tmp is used to hold the counters on SPARC
2993 LIR_Opr tmp = new_pointer_register();
2994 if (x->recv() != NULL) {
2995 LIRItem value(x->recv(), this);
2996 value.load_item();
2997 recv = new_register(T_OBJECT);
2998 __ move(value.result(), recv);
2999 }
3000 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3001 }
3003 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3004 // We can safely ignore accessors here, since c2 will inline them anyway,
3005 // accessors are also always mature.
3006 if (!x->inlinee()->is_accessor()) {
3007 CodeEmitInfo* info = state_for(x, x->state(), true);
3008 // Notify the runtime very infrequently only to take care of counter overflows
3009 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3010 }
3011 }
3013 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3014 int freq_log;
3015 int level = compilation()->env()->comp_level();
3016 if (level == CompLevel_limited_profile) {
3017 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3018 } else if (level == CompLevel_full_profile) {
3019 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3020 } else {
3021 ShouldNotReachHere();
3022 }
3023 // Increment the appropriate invocation/backedge counter and notify the runtime.
3024 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3025 }
3027 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3028 ciMethod *method, int frequency,
3029 int bci, bool backedge, bool notify) {
3030 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3031 int level = _compilation->env()->comp_level();
3032 assert(level > CompLevel_simple, "Shouldn't be here");
3034 int offset = -1;
3035 LIR_Opr counter_holder = new_register(T_OBJECT);
3036 LIR_Opr meth;
3037 if (level == CompLevel_limited_profile) {
3038 offset = in_bytes(backedge ? Method::backedge_counter_offset() :
3039 Method::invocation_counter_offset());
3040 __ oop2reg(method->constant_encoding(), counter_holder);
3041 meth = counter_holder;
3042 } else if (level == CompLevel_full_profile) {
3043 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3044 MethodData::invocation_counter_offset());
3045 ciMethodData* md = method->method_data_or_null();
3046 assert(md != NULL, "Sanity");
3047 __ oop2reg(md->constant_encoding(), counter_holder);
3048 meth = new_register(T_OBJECT);
3049 __ oop2reg(method->constant_encoding(), meth);
3050 } else {
3051 ShouldNotReachHere();
3052 }
3053 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3054 LIR_Opr result = new_register(T_INT);
3055 __ load(counter, result);
3056 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3057 __ store(result, counter);
3058 if (notify) {
3059 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3060 __ logical_and(result, mask, result);
3061 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3062 // The bci for info can point to cmp for if's we want the if bci
3063 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3064 __ branch(lir_cond_equal, T_INT, overflow);
3065 __ branch_destination(overflow->continuation());
3066 }
3067 }
3069 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3070 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3071 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3073 if (x->pass_thread()) {
3074 signature->append(T_ADDRESS);
3075 args->append(getThreadPointer());
3076 }
3078 for (int i = 0; i < x->number_of_arguments(); i++) {
3079 Value a = x->argument_at(i);
3080 LIRItem* item = new LIRItem(a, this);
3081 item->load_item();
3082 args->append(item->result());
3083 signature->append(as_BasicType(a->type()));
3084 }
3086 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3087 if (x->type() == voidType) {
3088 set_no_result(x);
3089 } else {
3090 __ move(result, rlock_result(x));
3091 }
3092 }
3094 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3095 LIRItemList args(1);
3096 LIRItem value(arg1, this);
3097 args.append(&value);
3098 BasicTypeList signature;
3099 signature.append(as_BasicType(arg1->type()));
3101 return call_runtime(&signature, &args, entry, result_type, info);
3102 }
3105 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3106 LIRItemList args(2);
3107 LIRItem value1(arg1, this);
3108 LIRItem value2(arg2, this);
3109 args.append(&value1);
3110 args.append(&value2);
3111 BasicTypeList signature;
3112 signature.append(as_BasicType(arg1->type()));
3113 signature.append(as_BasicType(arg2->type()));
3115 return call_runtime(&signature, &args, entry, result_type, info);
3116 }
3119 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3120 address entry, ValueType* result_type, CodeEmitInfo* info) {
3121 // get a result register
3122 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3123 LIR_Opr result = LIR_OprFact::illegalOpr;
3124 if (result_type->tag() != voidTag) {
3125 result = new_register(result_type);
3126 phys_reg = result_register_for(result_type);
3127 }
3129 // move the arguments into the correct location
3130 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3131 assert(cc->length() == args->length(), "argument mismatch");
3132 for (int i = 0; i < args->length(); i++) {
3133 LIR_Opr arg = args->at(i);
3134 LIR_Opr loc = cc->at(i);
3135 if (loc->is_register()) {
3136 __ move(arg, loc);
3137 } else {
3138 LIR_Address* addr = loc->as_address_ptr();
3139 // if (!can_store_as_constant(arg)) {
3140 // LIR_Opr tmp = new_register(arg->type());
3141 // __ move(arg, tmp);
3142 // arg = tmp;
3143 // }
3144 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3145 __ unaligned_move(arg, addr);
3146 } else {
3147 __ move(arg, addr);
3148 }
3149 }
3150 }
3152 if (info) {
3153 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3154 } else {
3155 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3156 }
3157 if (result->is_valid()) {
3158 __ move(phys_reg, result);
3159 }
3160 return result;
3161 }
3164 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3165 address entry, ValueType* result_type, CodeEmitInfo* info) {
3166 // get a result register
3167 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3168 LIR_Opr result = LIR_OprFact::illegalOpr;
3169 if (result_type->tag() != voidTag) {
3170 result = new_register(result_type);
3171 phys_reg = result_register_for(result_type);
3172 }
3174 // move the arguments into the correct location
3175 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3177 assert(cc->length() == args->length(), "argument mismatch");
3178 for (int i = 0; i < args->length(); i++) {
3179 LIRItem* arg = args->at(i);
3180 LIR_Opr loc = cc->at(i);
3181 if (loc->is_register()) {
3182 arg->load_item_force(loc);
3183 } else {
3184 LIR_Address* addr = loc->as_address_ptr();
3185 arg->load_for_store(addr->type());
3186 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3187 __ unaligned_move(arg->result(), addr);
3188 } else {
3189 __ move(arg->result(), addr);
3190 }
3191 }
3192 }
3194 if (info) {
3195 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3196 } else {
3197 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3198 }
3199 if (result->is_valid()) {
3200 __ move(phys_reg, result);
3201 }
3202 return result;
3203 }
3205 void LIRGenerator::do_MemBar(MemBar* x) {
3206 if (os::is_MP()) {
3207 LIR_Code code = x->code();
3208 switch(code) {
3209 case lir_membar_acquire : __ membar_acquire(); break;
3210 case lir_membar_release : __ membar_release(); break;
3211 case lir_membar : __ membar(); break;
3212 case lir_membar_loadload : __ membar_loadload(); break;
3213 case lir_membar_storestore: __ membar_storestore(); break;
3214 case lir_membar_loadstore : __ membar_loadstore(); break;
3215 case lir_membar_storeload : __ membar_storeload(); break;
3216 default : ShouldNotReachHere(); break;
3217 }
3218 }
3219 }