Wed, 31 Jan 2018 19:24:57 -0500
8189170: Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM
Reviewed-by: dcubed
1 /*
2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArrayKlass.hpp"
34 #include "ci/ciInstance.hpp"
35 #include "ci/ciObjArray.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "utilities/bitMap.inline.hpp"
39 #include "utilities/macros.hpp"
40 #if INCLUDE_ALL_GCS
41 #include "gc_implementation/g1/heapRegion.hpp"
42 #endif // INCLUDE_ALL_GCS
44 #ifdef ASSERT
45 #define __ gen()->lir(__FILE__, __LINE__)->
46 #else
47 #define __ gen()->lir()->
48 #endif
50 #ifndef PATCHED_ADDR
51 #define PATCHED_ADDR (max_jint)
52 #endif
54 void PhiResolverState::reset(int max_vregs) {
55 // Initialize array sizes
56 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
57 _virtual_operands.trunc_to(0);
58 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
59 _other_operands.trunc_to(0);
60 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
61 _vreg_table.trunc_to(0);
62 }
66 //--------------------------------------------------------------
67 // PhiResolver
69 // Resolves cycles:
70 //
71 // r1 := r2 becomes temp := r1
72 // r2 := r1 r1 := r2
73 // r2 := temp
74 // and orders moves:
75 //
76 // r2 := r3 becomes r1 := r2
77 // r1 := r2 r2 := r3
79 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
80 : _gen(gen)
81 , _state(gen->resolver_state())
82 , _temp(LIR_OprFact::illegalOpr)
83 {
84 // reinitialize the shared state arrays
85 _state.reset(max_vregs);
86 }
89 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
90 assert(src->is_valid(), "");
91 assert(dest->is_valid(), "");
92 __ move(src, dest);
93 }
96 void PhiResolver::move_temp_to(LIR_Opr dest) {
97 assert(_temp->is_valid(), "");
98 emit_move(_temp, dest);
99 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
100 }
103 void PhiResolver::move_to_temp(LIR_Opr src) {
104 assert(_temp->is_illegal(), "");
105 _temp = _gen->new_register(src->type());
106 emit_move(src, _temp);
107 }
110 // Traverse assignment graph in depth first order and generate moves in post order
111 // ie. two assignments: b := c, a := b start with node c:
112 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
113 // Generates moves in this order: move b to a and move c to b
114 // ie. cycle a := b, b := a start with node a
115 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
116 // Generates moves in this order: move b to temp, move a to b, move temp to a
117 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
118 if (!dest->visited()) {
119 dest->set_visited();
120 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
121 move(dest, dest->destination_at(i));
122 }
123 } else if (!dest->start_node()) {
124 // cylce in graph detected
125 assert(_loop == NULL, "only one loop valid!");
126 _loop = dest;
127 move_to_temp(src->operand());
128 return;
129 } // else dest is a start node
131 if (!dest->assigned()) {
132 if (_loop == dest) {
133 move_temp_to(dest->operand());
134 dest->set_assigned();
135 } else if (src != NULL) {
136 emit_move(src->operand(), dest->operand());
137 dest->set_assigned();
138 }
139 }
140 }
143 PhiResolver::~PhiResolver() {
144 int i;
145 // resolve any cycles in moves from and to virtual registers
146 for (i = virtual_operands().length() - 1; i >= 0; i --) {
147 ResolveNode* node = virtual_operands()[i];
148 if (!node->visited()) {
149 _loop = NULL;
150 move(NULL, node);
151 node->set_start_node();
152 assert(_temp->is_illegal(), "move_temp_to() call missing");
153 }
154 }
156 // generate move for move from non virtual register to abitrary destination
157 for (i = other_operands().length() - 1; i >= 0; i --) {
158 ResolveNode* node = other_operands()[i];
159 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
160 emit_move(node->operand(), node->destination_at(j)->operand());
161 }
162 }
163 }
166 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
167 ResolveNode* node;
168 if (opr->is_virtual()) {
169 int vreg_num = opr->vreg_number();
170 node = vreg_table().at_grow(vreg_num, NULL);
171 assert(node == NULL || node->operand() == opr, "");
172 if (node == NULL) {
173 node = new ResolveNode(opr);
174 vreg_table()[vreg_num] = node;
175 }
176 // Make sure that all virtual operands show up in the list when
177 // they are used as the source of a move.
178 if (source && !virtual_operands().contains(node)) {
179 virtual_operands().append(node);
180 }
181 } else {
182 assert(source, "");
183 node = new ResolveNode(opr);
184 other_operands().append(node);
185 }
186 return node;
187 }
190 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
191 assert(dest->is_virtual(), "");
192 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
193 assert(src->is_valid(), "");
194 assert(dest->is_valid(), "");
195 ResolveNode* source = source_node(src);
196 source->append(destination_node(dest));
197 }
200 //--------------------------------------------------------------
201 // LIRItem
203 void LIRItem::set_result(LIR_Opr opr) {
204 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
205 value()->set_operand(opr);
207 if (opr->is_virtual()) {
208 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
209 }
211 _result = opr;
212 }
214 void LIRItem::load_item() {
215 if (result()->is_illegal()) {
216 // update the items result
217 _result = value()->operand();
218 }
219 if (!result()->is_register()) {
220 LIR_Opr reg = _gen->new_register(value()->type());
221 __ move(result(), reg);
222 if (result()->is_constant()) {
223 _result = reg;
224 } else {
225 set_result(reg);
226 }
227 }
228 }
231 void LIRItem::load_for_store(BasicType type) {
232 if (_gen->can_store_as_constant(value(), type)) {
233 _result = value()->operand();
234 if (!_result->is_constant()) {
235 _result = LIR_OprFact::value_type(value()->type());
236 }
237 } else if (type == T_BYTE || type == T_BOOLEAN) {
238 load_byte_item();
239 } else {
240 load_item();
241 }
242 }
244 void LIRItem::load_item_force(LIR_Opr reg) {
245 LIR_Opr r = result();
246 if (r != reg) {
247 #if !defined(ARM) && !defined(E500V2)
248 if (r->type() != reg->type()) {
249 // moves between different types need an intervening spill slot
250 r = _gen->force_to_spill(r, reg->type());
251 }
252 #endif
253 __ move(r, reg);
254 _result = reg;
255 }
256 }
258 ciObject* LIRItem::get_jobject_constant() const {
259 ObjectType* oc = type()->as_ObjectType();
260 if (oc) {
261 return oc->constant_value();
262 }
263 return NULL;
264 }
267 jint LIRItem::get_jint_constant() const {
268 assert(is_constant() && value() != NULL, "");
269 assert(type()->as_IntConstant() != NULL, "type check");
270 return type()->as_IntConstant()->value();
271 }
274 jint LIRItem::get_address_constant() const {
275 assert(is_constant() && value() != NULL, "");
276 assert(type()->as_AddressConstant() != NULL, "type check");
277 return type()->as_AddressConstant()->value();
278 }
281 jfloat LIRItem::get_jfloat_constant() const {
282 assert(is_constant() && value() != NULL, "");
283 assert(type()->as_FloatConstant() != NULL, "type check");
284 return type()->as_FloatConstant()->value();
285 }
288 jdouble LIRItem::get_jdouble_constant() const {
289 assert(is_constant() && value() != NULL, "");
290 assert(type()->as_DoubleConstant() != NULL, "type check");
291 return type()->as_DoubleConstant()->value();
292 }
295 jlong LIRItem::get_jlong_constant() const {
296 assert(is_constant() && value() != NULL, "");
297 assert(type()->as_LongConstant() != NULL, "type check");
298 return type()->as_LongConstant()->value();
299 }
303 //--------------------------------------------------------------
306 void LIRGenerator::init() {
307 _bs = Universe::heap()->barrier_set();
308 }
311 void LIRGenerator::block_do_prolog(BlockBegin* block) {
312 #ifndef PRODUCT
313 if (PrintIRWithLIR) {
314 block->print();
315 }
316 #endif
318 // set up the list of LIR instructions
319 assert(block->lir() == NULL, "LIR list already computed for this block");
320 _lir = new LIR_List(compilation(), block);
321 block->set_lir(_lir);
323 __ branch_destination(block->label());
325 if (LIRTraceExecution &&
326 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
327 !block->is_set(BlockBegin::exception_entry_flag)) {
328 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
329 trace_block_entry(block);
330 }
331 }
334 void LIRGenerator::block_do_epilog(BlockBegin* block) {
335 #ifndef PRODUCT
336 if (PrintIRWithLIR) {
337 tty->cr();
338 }
339 #endif
341 // LIR_Opr for unpinned constants shouldn't be referenced by other
342 // blocks so clear them out after processing the block.
343 for (int i = 0; i < _unpinned_constants.length(); i++) {
344 _unpinned_constants.at(i)->clear_operand();
345 }
346 _unpinned_constants.trunc_to(0);
348 // clear our any registers for other local constants
349 _constants.trunc_to(0);
350 _reg_for_constants.trunc_to(0);
351 }
354 void LIRGenerator::block_do(BlockBegin* block) {
355 CHECK_BAILOUT();
357 block_do_prolog(block);
358 set_block(block);
360 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
361 if (instr->is_pinned()) do_root(instr);
362 }
364 set_block(NULL);
365 block_do_epilog(block);
366 }
369 //-------------------------LIRGenerator-----------------------------
371 // This is where the tree-walk starts; instr must be root;
372 void LIRGenerator::do_root(Value instr) {
373 CHECK_BAILOUT();
375 InstructionMark im(compilation(), instr);
377 assert(instr->is_pinned(), "use only with roots");
378 assert(instr->subst() == instr, "shouldn't have missed substitution");
380 instr->visit(this);
382 assert(!instr->has_uses() || instr->operand()->is_valid() ||
383 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
384 }
387 // This is called for each node in tree; the walk stops if a root is reached
388 void LIRGenerator::walk(Value instr) {
389 InstructionMark im(compilation(), instr);
390 //stop walk when encounter a root
391 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
392 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
393 } else {
394 assert(instr->subst() == instr, "shouldn't have missed substitution");
395 instr->visit(this);
396 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
397 }
398 }
401 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
402 assert(state != NULL, "state must be defined");
404 #ifndef PRODUCT
405 state->verify();
406 #endif
408 ValueStack* s = state;
409 for_each_state(s) {
410 if (s->kind() == ValueStack::EmptyExceptionState) {
411 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
412 continue;
413 }
415 int index;
416 Value value;
417 for_each_stack_value(s, index, value) {
418 assert(value->subst() == value, "missed substitution");
419 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
420 walk(value);
421 assert(value->operand()->is_valid(), "must be evaluated now");
422 }
423 }
425 int bci = s->bci();
426 IRScope* scope = s->scope();
427 ciMethod* method = scope->method();
429 MethodLivenessResult liveness = method->liveness_at_bci(bci);
430 if (bci == SynchronizationEntryBCI) {
431 if (x->as_ExceptionObject() || x->as_Throw()) {
432 // all locals are dead on exit from the synthetic unlocker
433 liveness.clear();
434 } else {
435 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
436 }
437 }
438 if (!liveness.is_valid()) {
439 // Degenerate or breakpointed method.
440 bailout("Degenerate or breakpointed method");
441 } else {
442 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
443 for_each_local_value(s, index, value) {
444 assert(value->subst() == value, "missed substition");
445 if (liveness.at(index) && !value->type()->is_illegal()) {
446 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
447 walk(value);
448 assert(value->operand()->is_valid(), "must be evaluated now");
449 }
450 } else {
451 // NULL out this local so that linear scan can assume that all non-NULL values are live.
452 s->invalidate_local(index);
453 }
454 }
455 }
456 }
458 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
459 }
462 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
463 return state_for(x, x->exception_state());
464 }
467 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
468 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
469 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
470 * the class. */
471 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
472 assert(info != NULL, "info must be set if class is not loaded");
473 __ klass2reg_patch(NULL, r, info);
474 } else {
475 // no patching needed
476 __ metadata2reg(obj->constant_encoding(), r);
477 }
478 }
481 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
482 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
483 CodeStub* stub = new RangeCheckStub(range_check_info, index);
484 if (index->is_constant()) {
485 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
486 index->as_jint(), null_check_info);
487 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
488 } else {
489 cmp_reg_mem(lir_cond_aboveEqual, index, array,
490 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
491 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
492 }
493 }
496 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
497 CodeStub* stub = new RangeCheckStub(info, index, true);
498 if (index->is_constant()) {
499 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
500 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
501 } else {
502 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
503 java_nio_Buffer::limit_offset(), T_INT, info);
504 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
505 }
506 __ move(index, result);
507 }
511 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
512 LIR_Opr result_op = result;
513 LIR_Opr left_op = left;
514 LIR_Opr right_op = right;
516 if (TwoOperandLIRForm && left_op != result_op) {
517 assert(right_op != result_op, "malformed");
518 __ move(left_op, result_op);
519 left_op = result_op;
520 }
522 switch(code) {
523 case Bytecodes::_dadd:
524 case Bytecodes::_fadd:
525 case Bytecodes::_ladd:
526 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
527 case Bytecodes::_fmul:
528 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
530 case Bytecodes::_dmul:
531 {
532 if (is_strictfp) {
533 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
534 } else {
535 __ mul(left_op, right_op, result_op); break;
536 }
537 }
538 break;
540 case Bytecodes::_imul:
541 {
542 bool did_strength_reduce = false;
544 if (right->is_constant()) {
545 int c = right->as_jint();
546 if (is_power_of_2(c)) {
547 // do not need tmp here
548 __ shift_left(left_op, exact_log2(c), result_op);
549 did_strength_reduce = true;
550 } else {
551 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
552 }
553 }
554 // we couldn't strength reduce so just emit the multiply
555 if (!did_strength_reduce) {
556 __ mul(left_op, right_op, result_op);
557 }
558 }
559 break;
561 case Bytecodes::_dsub:
562 case Bytecodes::_fsub:
563 case Bytecodes::_lsub:
564 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
566 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
567 // ldiv and lrem are implemented with a direct runtime call
569 case Bytecodes::_ddiv:
570 {
571 if (is_strictfp) {
572 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
573 } else {
574 __ div (left_op, right_op, result_op); break;
575 }
576 }
577 break;
579 case Bytecodes::_drem:
580 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
582 default: ShouldNotReachHere();
583 }
584 }
587 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
588 arithmetic_op(code, result, left, right, false, tmp);
589 }
592 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
593 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
594 }
597 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
598 arithmetic_op(code, result, left, right, is_strictfp, tmp);
599 }
602 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
603 if (TwoOperandLIRForm && value != result_op) {
604 assert(count != result_op, "malformed");
605 __ move(value, result_op);
606 value = result_op;
607 }
609 assert(count->is_constant() || count->is_register(), "must be");
610 switch(code) {
611 case Bytecodes::_ishl:
612 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
613 case Bytecodes::_ishr:
614 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
615 case Bytecodes::_iushr:
616 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
617 default: ShouldNotReachHere();
618 }
619 }
622 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
623 if (TwoOperandLIRForm && left_op != result_op) {
624 assert(right_op != result_op, "malformed");
625 __ move(left_op, result_op);
626 left_op = result_op;
627 }
629 switch(code) {
630 case Bytecodes::_iand:
631 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
633 case Bytecodes::_ior:
634 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
636 case Bytecodes::_ixor:
637 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
639 default: ShouldNotReachHere();
640 }
641 }
644 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
645 if (!GenerateSynchronizationCode) return;
646 // for slow path, use debug info for state after successful locking
647 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
648 __ load_stack_address_monitor(monitor_no, lock);
649 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
650 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
651 }
654 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
655 if (!GenerateSynchronizationCode) return;
656 // setup registers
657 LIR_Opr hdr = lock;
658 lock = new_hdr;
659 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
660 __ load_stack_address_monitor(monitor_no, lock);
661 __ unlock_object(hdr, object, lock, scratch, slow_path);
662 }
664 #ifndef PRODUCT
665 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
666 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
667 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
668 } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
669 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
670 }
671 }
672 #endif
674 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
675 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
676 // If klass is not loaded we do not know if the klass has finalizers:
677 if (UseFastNewInstance && klass->is_loaded()
678 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
680 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
682 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
684 assert(klass->is_loaded(), "must be loaded");
685 // allocate space for instance
686 assert(klass->size_helper() >= 0, "illegal instance size");
687 const int instance_size = align_object_size(klass->size_helper());
688 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
689 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
690 } else {
691 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
692 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
693 __ branch_destination(slow_path->continuation());
694 }
695 }
698 static bool is_constant_zero(Instruction* inst) {
699 IntConstant* c = inst->type()->as_IntConstant();
700 if (c) {
701 return (c->value() == 0);
702 }
703 return false;
704 }
707 static bool positive_constant(Instruction* inst) {
708 IntConstant* c = inst->type()->as_IntConstant();
709 if (c) {
710 return (c->value() >= 0);
711 }
712 return false;
713 }
716 static ciArrayKlass* as_array_klass(ciType* type) {
717 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
718 return (ciArrayKlass*)type;
719 } else {
720 return NULL;
721 }
722 }
724 static ciType* phi_declared_type(Phi* phi) {
725 ciType* t = phi->operand_at(0)->declared_type();
726 if (t == NULL) {
727 return NULL;
728 }
729 for(int i = 1; i < phi->operand_count(); i++) {
730 if (t != phi->operand_at(i)->declared_type()) {
731 return NULL;
732 }
733 }
734 return t;
735 }
737 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
738 Instruction* src = x->argument_at(0);
739 Instruction* src_pos = x->argument_at(1);
740 Instruction* dst = x->argument_at(2);
741 Instruction* dst_pos = x->argument_at(3);
742 Instruction* length = x->argument_at(4);
744 // first try to identify the likely type of the arrays involved
745 ciArrayKlass* expected_type = NULL;
746 bool is_exact = false, src_objarray = false, dst_objarray = false;
747 {
748 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
749 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
750 Phi* phi;
751 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
752 src_declared_type = as_array_klass(phi_declared_type(phi));
753 }
754 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
755 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
756 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
757 dst_declared_type = as_array_klass(phi_declared_type(phi));
758 }
760 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
761 // the types exactly match so the type is fully known
762 is_exact = true;
763 expected_type = src_exact_type;
764 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
765 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
766 ciArrayKlass* src_type = NULL;
767 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
768 src_type = (ciArrayKlass*) src_exact_type;
769 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
770 src_type = (ciArrayKlass*) src_declared_type;
771 }
772 if (src_type != NULL) {
773 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
774 is_exact = true;
775 expected_type = dst_type;
776 }
777 }
778 }
779 // at least pass along a good guess
780 if (expected_type == NULL) expected_type = dst_exact_type;
781 if (expected_type == NULL) expected_type = src_declared_type;
782 if (expected_type == NULL) expected_type = dst_declared_type;
784 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
785 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
786 }
788 // if a probable array type has been identified, figure out if any
789 // of the required checks for a fast case can be elided.
790 int flags = LIR_OpArrayCopy::all_flags;
792 if (!src_objarray)
793 flags &= ~LIR_OpArrayCopy::src_objarray;
794 if (!dst_objarray)
795 flags &= ~LIR_OpArrayCopy::dst_objarray;
797 if (!x->arg_needs_null_check(0))
798 flags &= ~LIR_OpArrayCopy::src_null_check;
799 if (!x->arg_needs_null_check(2))
800 flags &= ~LIR_OpArrayCopy::dst_null_check;
803 if (expected_type != NULL) {
804 Value length_limit = NULL;
806 IfOp* ifop = length->as_IfOp();
807 if (ifop != NULL) {
808 // look for expressions like min(v, a.length) which ends up as
809 // x > y ? y : x or x >= y ? y : x
810 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
811 ifop->x() == ifop->fval() &&
812 ifop->y() == ifop->tval()) {
813 length_limit = ifop->y();
814 }
815 }
817 // try to skip null checks and range checks
818 NewArray* src_array = src->as_NewArray();
819 if (src_array != NULL) {
820 flags &= ~LIR_OpArrayCopy::src_null_check;
821 if (length_limit != NULL &&
822 src_array->length() == length_limit &&
823 is_constant_zero(src_pos)) {
824 flags &= ~LIR_OpArrayCopy::src_range_check;
825 }
826 }
828 NewArray* dst_array = dst->as_NewArray();
829 if (dst_array != NULL) {
830 flags &= ~LIR_OpArrayCopy::dst_null_check;
831 if (length_limit != NULL &&
832 dst_array->length() == length_limit &&
833 is_constant_zero(dst_pos)) {
834 flags &= ~LIR_OpArrayCopy::dst_range_check;
835 }
836 }
838 // check from incoming constant values
839 if (positive_constant(src_pos))
840 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
841 if (positive_constant(dst_pos))
842 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
843 if (positive_constant(length))
844 flags &= ~LIR_OpArrayCopy::length_positive_check;
846 // see if the range check can be elided, which might also imply
847 // that src or dst is non-null.
848 ArrayLength* al = length->as_ArrayLength();
849 if (al != NULL) {
850 if (al->array() == src) {
851 // it's the length of the source array
852 flags &= ~LIR_OpArrayCopy::length_positive_check;
853 flags &= ~LIR_OpArrayCopy::src_null_check;
854 if (is_constant_zero(src_pos))
855 flags &= ~LIR_OpArrayCopy::src_range_check;
856 }
857 if (al->array() == dst) {
858 // it's the length of the destination array
859 flags &= ~LIR_OpArrayCopy::length_positive_check;
860 flags &= ~LIR_OpArrayCopy::dst_null_check;
861 if (is_constant_zero(dst_pos))
862 flags &= ~LIR_OpArrayCopy::dst_range_check;
863 }
864 }
865 if (is_exact) {
866 flags &= ~LIR_OpArrayCopy::type_check;
867 }
868 }
870 IntConstant* src_int = src_pos->type()->as_IntConstant();
871 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
872 if (src_int && dst_int) {
873 int s_offs = src_int->value();
874 int d_offs = dst_int->value();
875 if (src_int->value() >= dst_int->value()) {
876 flags &= ~LIR_OpArrayCopy::overlapping;
877 }
878 if (expected_type != NULL) {
879 BasicType t = expected_type->element_type()->basic_type();
880 int element_size = type2aelembytes(t);
881 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
882 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
883 flags &= ~LIR_OpArrayCopy::unaligned;
884 }
885 }
886 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
887 // src and dest positions are the same, or dst is zero so assume
888 // nonoverlapping copy.
889 flags &= ~LIR_OpArrayCopy::overlapping;
890 }
892 if (src == dst) {
893 // moving within a single array so no type checks are needed
894 if (flags & LIR_OpArrayCopy::type_check) {
895 flags &= ~LIR_OpArrayCopy::type_check;
896 }
897 }
898 *flagsp = flags;
899 *expected_typep = (ciArrayKlass*)expected_type;
900 }
903 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
904 assert(opr->is_register(), "why spill if item is not register?");
906 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
907 LIR_Opr result = new_register(T_FLOAT);
908 set_vreg_flag(result, must_start_in_memory);
909 assert(opr->is_register(), "only a register can be spilled");
910 assert(opr->value_type()->is_float(), "rounding only for floats available");
911 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
912 return result;
913 }
914 return opr;
915 }
918 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
919 assert(type2size[t] == type2size[value->type()],
920 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
921 if (!value->is_register()) {
922 // force into a register
923 LIR_Opr r = new_register(value->type());
924 __ move(value, r);
925 value = r;
926 }
928 // create a spill location
929 LIR_Opr tmp = new_register(t);
930 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
932 // move from register to spill
933 __ move(value, tmp);
934 return tmp;
935 }
937 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
938 if (if_instr->should_profile()) {
939 ciMethod* method = if_instr->profiled_method();
940 assert(method != NULL, "method should be set if branch is profiled");
941 ciMethodData* md = method->method_data_or_null();
942 assert(md != NULL, "Sanity");
943 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
944 assert(data != NULL, "must have profiling data");
945 assert(data->is_BranchData(), "need BranchData for two-way branches");
946 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
947 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
948 if (if_instr->is_swapped()) {
949 int t = taken_count_offset;
950 taken_count_offset = not_taken_count_offset;
951 not_taken_count_offset = t;
952 }
954 LIR_Opr md_reg = new_register(T_METADATA);
955 __ metadata2reg(md->constant_encoding(), md_reg);
957 LIR_Opr data_offset_reg = new_pointer_register();
958 __ cmove(lir_cond(cond),
959 LIR_OprFact::intptrConst(taken_count_offset),
960 LIR_OprFact::intptrConst(not_taken_count_offset),
961 data_offset_reg, as_BasicType(if_instr->x()->type()));
963 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
964 LIR_Opr data_reg = new_pointer_register();
965 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
966 __ move(data_addr, data_reg);
967 // Use leal instead of add to avoid destroying condition codes on x86
968 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
969 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
970 __ move(data_reg, data_addr);
971 }
972 }
974 // Phi technique:
975 // This is about passing live values from one basic block to the other.
976 // In code generated with Java it is rather rare that more than one
977 // value is on the stack from one basic block to the other.
978 // We optimize our technique for efficient passing of one value
979 // (of type long, int, double..) but it can be extended.
980 // When entering or leaving a basic block, all registers and all spill
981 // slots are release and empty. We use the released registers
982 // and spill slots to pass the live values from one block
983 // to the other. The topmost value, i.e., the value on TOS of expression
984 // stack is passed in registers. All other values are stored in spilling
985 // area. Every Phi has an index which designates its spill slot
986 // At exit of a basic block, we fill the register(s) and spill slots.
987 // At entry of a basic block, the block_prolog sets up the content of phi nodes
988 // and locks necessary registers and spilling slots.
991 // move current value to referenced phi function
992 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
993 Phi* phi = sux_val->as_Phi();
994 // cur_val can be null without phi being null in conjunction with inlining
995 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
996 LIR_Opr operand = cur_val->operand();
997 if (cur_val->operand()->is_illegal()) {
998 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
999 "these can be produced lazily");
1000 operand = operand_for_instruction(cur_val);
1001 }
1002 resolver->move(operand, operand_for_instruction(phi));
1003 }
1004 }
1007 // Moves all stack values into their PHI position
1008 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1009 BlockBegin* bb = block();
1010 if (bb->number_of_sux() == 1) {
1011 BlockBegin* sux = bb->sux_at(0);
1012 assert(sux->number_of_preds() > 0, "invalid CFG");
1014 // a block with only one predecessor never has phi functions
1015 if (sux->number_of_preds() > 1) {
1016 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1017 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1019 ValueStack* sux_state = sux->state();
1020 Value sux_value;
1021 int index;
1023 assert(cur_state->scope() == sux_state->scope(), "not matching");
1024 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1025 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1027 for_each_stack_value(sux_state, index, sux_value) {
1028 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1029 }
1031 for_each_local_value(sux_state, index, sux_value) {
1032 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1033 }
1035 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1036 }
1037 }
1038 }
1041 LIR_Opr LIRGenerator::new_register(BasicType type) {
1042 int vreg = _virtual_register_number;
1043 // add a little fudge factor for the bailout, since the bailout is
1044 // only checked periodically. This gives a few extra registers to
1045 // hand out before we really run out, which helps us keep from
1046 // tripping over assertions.
1047 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1048 bailout("out of virtual registers");
1049 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1050 // wrap it around
1051 _virtual_register_number = LIR_OprDesc::vreg_base;
1052 }
1053 }
1054 _virtual_register_number += 1;
1055 return LIR_OprFact::virtual_register(vreg, type);
1056 }
1059 // Try to lock using register in hint
1060 LIR_Opr LIRGenerator::rlock(Value instr) {
1061 return new_register(instr->type());
1062 }
1065 // does an rlock and sets result
1066 LIR_Opr LIRGenerator::rlock_result(Value x) {
1067 LIR_Opr reg = rlock(x);
1068 set_result(x, reg);
1069 return reg;
1070 }
1073 // does an rlock and sets result
1074 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1075 LIR_Opr reg;
1076 switch (type) {
1077 case T_BYTE:
1078 case T_BOOLEAN:
1079 reg = rlock_byte(type);
1080 break;
1081 default:
1082 reg = rlock(x);
1083 break;
1084 }
1086 set_result(x, reg);
1087 return reg;
1088 }
1091 //---------------------------------------------------------------------
1092 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1093 ObjectType* oc = value->type()->as_ObjectType();
1094 if (oc) {
1095 return oc->constant_value();
1096 }
1097 return NULL;
1098 }
1101 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1102 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1103 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1105 // no moves are created for phi functions at the begin of exception
1106 // handlers, so assign operands manually here
1107 for_each_phi_fun(block(), phi,
1108 operand_for_instruction(phi));
1110 LIR_Opr thread_reg = getThreadPointer();
1111 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1112 exceptionOopOpr());
1113 __ move_wide(LIR_OprFact::oopConst(NULL),
1114 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1115 __ move_wide(LIR_OprFact::oopConst(NULL),
1116 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1118 LIR_Opr result = new_register(T_OBJECT);
1119 __ move(exceptionOopOpr(), result);
1120 set_result(x, result);
1121 }
1124 //----------------------------------------------------------------------
1125 //----------------------------------------------------------------------
1126 //----------------------------------------------------------------------
1127 //----------------------------------------------------------------------
1128 // visitor functions
1129 //----------------------------------------------------------------------
1130 //----------------------------------------------------------------------
1131 //----------------------------------------------------------------------
1132 //----------------------------------------------------------------------
1134 void LIRGenerator::do_Phi(Phi* x) {
1135 // phi functions are never visited directly
1136 ShouldNotReachHere();
1137 }
1140 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1141 void LIRGenerator::do_Constant(Constant* x) {
1142 if (x->state_before() != NULL) {
1143 // Any constant with a ValueStack requires patching so emit the patch here
1144 LIR_Opr reg = rlock_result(x);
1145 CodeEmitInfo* info = state_for(x, x->state_before());
1146 __ oop2reg_patch(NULL, reg, info);
1147 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1148 if (!x->is_pinned()) {
1149 // unpinned constants are handled specially so that they can be
1150 // put into registers when they are used multiple times within a
1151 // block. After the block completes their operand will be
1152 // cleared so that other blocks can't refer to that register.
1153 set_result(x, load_constant(x));
1154 } else {
1155 LIR_Opr res = x->operand();
1156 if (!res->is_valid()) {
1157 res = LIR_OprFact::value_type(x->type());
1158 }
1159 if (res->is_constant()) {
1160 LIR_Opr reg = rlock_result(x);
1161 __ move(res, reg);
1162 } else {
1163 set_result(x, res);
1164 }
1165 }
1166 } else {
1167 set_result(x, LIR_OprFact::value_type(x->type()));
1168 }
1169 }
1172 void LIRGenerator::do_Local(Local* x) {
1173 // operand_for_instruction has the side effect of setting the result
1174 // so there's no need to do it here.
1175 operand_for_instruction(x);
1176 }
1179 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1180 Unimplemented();
1181 }
1184 void LIRGenerator::do_Return(Return* x) {
1185 if (compilation()->env()->dtrace_method_probes()) {
1186 BasicTypeList signature;
1187 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1188 signature.append(T_METADATA); // Method*
1189 LIR_OprList* args = new LIR_OprList();
1190 args->append(getThreadPointer());
1191 LIR_Opr meth = new_register(T_METADATA);
1192 __ metadata2reg(method()->constant_encoding(), meth);
1193 args->append(meth);
1194 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1195 }
1197 if (x->type()->is_void()) {
1198 __ return_op(LIR_OprFact::illegalOpr);
1199 } else {
1200 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1201 LIRItem result(x->result(), this);
1203 result.load_item_force(reg);
1204 __ return_op(result.result());
1205 }
1206 set_no_result(x);
1207 }
1209 // Examble: ref.get()
1210 // Combination of LoadField and g1 pre-write barrier
1211 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1213 const int referent_offset = java_lang_ref_Reference::referent_offset;
1214 guarantee(referent_offset > 0, "referent offset not initialized");
1216 assert(x->number_of_arguments() == 1, "wrong type");
1218 LIRItem reference(x->argument_at(0), this);
1219 reference.load_item();
1221 // need to perform the null check on the reference objecy
1222 CodeEmitInfo* info = NULL;
1223 if (x->needs_null_check()) {
1224 info = state_for(x);
1225 }
1227 LIR_Address* referent_field_adr =
1228 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1230 LIR_Opr result = rlock_result(x);
1232 __ load(referent_field_adr, result, info);
1234 // Register the value in the referent field with the pre-barrier
1235 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1236 result /* pre_val */,
1237 false /* do_load */,
1238 false /* patch */,
1239 NULL /* info */);
1240 }
1242 // Example: clazz.isInstance(object)
1243 void LIRGenerator::do_isInstance(Intrinsic* x) {
1244 assert(x->number_of_arguments() == 2, "wrong type");
1246 // TODO could try to substitute this node with an equivalent InstanceOf
1247 // if clazz is known to be a constant Class. This will pick up newly found
1248 // constants after HIR construction. I'll leave this to a future change.
1250 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1251 // could follow the aastore example in a future change.
1253 LIRItem clazz(x->argument_at(0), this);
1254 LIRItem object(x->argument_at(1), this);
1255 clazz.load_item();
1256 object.load_item();
1257 LIR_Opr result = rlock_result(x);
1259 // need to perform null check on clazz
1260 if (x->needs_null_check()) {
1261 CodeEmitInfo* info = state_for(x);
1262 __ null_check(clazz.result(), info);
1263 }
1265 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1266 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1267 x->type(),
1268 NULL); // NULL CodeEmitInfo results in a leaf call
1269 __ move(call_result, result);
1270 }
1272 // Example: object.getClass ()
1273 void LIRGenerator::do_getClass(Intrinsic* x) {
1274 assert(x->number_of_arguments() == 1, "wrong type");
1276 LIRItem rcvr(x->argument_at(0), this);
1277 rcvr.load_item();
1278 LIR_Opr temp = new_register(T_METADATA);
1279 LIR_Opr result = rlock_result(x);
1281 // need to perform the null check on the rcvr
1282 CodeEmitInfo* info = NULL;
1283 if (x->needs_null_check()) {
1284 info = state_for(x);
1285 }
1287 // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1288 // meaning of these two is mixed up (see JDK-8026837).
1289 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1290 __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1291 }
1294 // Example: Thread.currentThread()
1295 void LIRGenerator::do_currentThread(Intrinsic* x) {
1296 assert(x->number_of_arguments() == 0, "wrong type");
1297 LIR_Opr reg = rlock_result(x);
1298 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1299 }
1302 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1303 assert(x->number_of_arguments() == 1, "wrong type");
1304 LIRItem receiver(x->argument_at(0), this);
1306 receiver.load_item();
1307 BasicTypeList signature;
1308 signature.append(T_OBJECT); // receiver
1309 LIR_OprList* args = new LIR_OprList();
1310 args->append(receiver.result());
1311 CodeEmitInfo* info = state_for(x, x->state());
1312 call_runtime(&signature, args,
1313 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1314 voidType, info);
1316 set_no_result(x);
1317 }
1320 //------------------------local access--------------------------------------
1322 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1323 if (x->operand()->is_illegal()) {
1324 Constant* c = x->as_Constant();
1325 if (c != NULL) {
1326 x->set_operand(LIR_OprFact::value_type(c->type()));
1327 } else {
1328 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1329 // allocate a virtual register for this local or phi
1330 x->set_operand(rlock(x));
1331 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1332 }
1333 }
1334 return x->operand();
1335 }
1338 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1339 if (opr->is_virtual()) {
1340 return instruction_for_vreg(opr->vreg_number());
1341 }
1342 return NULL;
1343 }
1346 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1347 if (reg_num < _instruction_for_operand.length()) {
1348 return _instruction_for_operand.at(reg_num);
1349 }
1350 return NULL;
1351 }
1354 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1355 if (_vreg_flags.size_in_bits() == 0) {
1356 BitMap2D temp(100, num_vreg_flags);
1357 temp.clear();
1358 _vreg_flags = temp;
1359 }
1360 _vreg_flags.at_put_grow(vreg_num, f, true);
1361 }
1363 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1364 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1365 return false;
1366 }
1367 return _vreg_flags.at(vreg_num, f);
1368 }
1371 // Block local constant handling. This code is useful for keeping
1372 // unpinned constants and constants which aren't exposed in the IR in
1373 // registers. Unpinned Constant instructions have their operands
1374 // cleared when the block is finished so that other blocks can't end
1375 // up referring to their registers.
1377 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1378 assert(!x->is_pinned(), "only for unpinned constants");
1379 _unpinned_constants.append(x);
1380 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1381 }
1384 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1385 BasicType t = c->type();
1386 for (int i = 0; i < _constants.length(); i++) {
1387 LIR_Const* other = _constants.at(i);
1388 if (t == other->type()) {
1389 switch (t) {
1390 case T_INT:
1391 case T_FLOAT:
1392 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1393 break;
1394 case T_LONG:
1395 case T_DOUBLE:
1396 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1397 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1398 break;
1399 case T_OBJECT:
1400 if (c->as_jobject() != other->as_jobject()) continue;
1401 break;
1402 }
1403 return _reg_for_constants.at(i);
1404 }
1405 }
1407 LIR_Opr result = new_register(t);
1408 __ move((LIR_Opr)c, result);
1409 _constants.append(c);
1410 _reg_for_constants.append(result);
1411 return result;
1412 }
1414 // Various barriers
1416 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1417 bool do_load, bool patch, CodeEmitInfo* info) {
1418 // Do the pre-write barrier, if any.
1419 switch (_bs->kind()) {
1420 #if INCLUDE_ALL_GCS
1421 case BarrierSet::G1SATBCT:
1422 case BarrierSet::G1SATBCTLogging:
1423 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1424 break;
1425 #endif // INCLUDE_ALL_GCS
1426 case BarrierSet::CardTableModRef:
1427 case BarrierSet::CardTableExtension:
1428 // No pre barriers
1429 break;
1430 case BarrierSet::ModRef:
1431 case BarrierSet::Other:
1432 // No pre barriers
1433 break;
1434 default :
1435 ShouldNotReachHere();
1437 }
1438 }
1440 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1441 switch (_bs->kind()) {
1442 #if INCLUDE_ALL_GCS
1443 case BarrierSet::G1SATBCT:
1444 case BarrierSet::G1SATBCTLogging:
1445 G1SATBCardTableModRef_post_barrier(addr, new_val);
1446 break;
1447 #endif // INCLUDE_ALL_GCS
1448 case BarrierSet::CardTableModRef:
1449 case BarrierSet::CardTableExtension:
1450 CardTableModRef_post_barrier(addr, new_val);
1451 break;
1452 case BarrierSet::ModRef:
1453 case BarrierSet::Other:
1454 // No post barriers
1455 break;
1456 default :
1457 ShouldNotReachHere();
1458 }
1459 }
1461 ////////////////////////////////////////////////////////////////////////
1462 #if INCLUDE_ALL_GCS
1464 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1465 bool do_load, bool patch, CodeEmitInfo* info) {
1466 // First we test whether marking is in progress.
1467 BasicType flag_type;
1468 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1469 flag_type = T_INT;
1470 } else {
1471 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1472 "Assumption");
1473 flag_type = T_BYTE;
1474 }
1475 LIR_Opr thrd = getThreadPointer();
1476 LIR_Address* mark_active_flag_addr =
1477 new LIR_Address(thrd,
1478 in_bytes(JavaThread::satb_mark_queue_offset() +
1479 PtrQueue::byte_offset_of_active()),
1480 flag_type);
1481 // Read the marking-in-progress flag.
1482 LIR_Opr flag_val = new_register(T_INT);
1483 __ load(mark_active_flag_addr, flag_val);
1484 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1486 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1488 CodeStub* slow;
1490 if (do_load) {
1491 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1492 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1494 if (patch)
1495 pre_val_patch_code = lir_patch_normal;
1497 pre_val = new_register(T_OBJECT);
1499 if (!addr_opr->is_address()) {
1500 assert(addr_opr->is_register(), "must be");
1501 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1502 }
1503 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1504 } else {
1505 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1506 assert(pre_val->is_register(), "must be");
1507 assert(pre_val->type() == T_OBJECT, "must be an object");
1508 assert(info == NULL, "sanity");
1510 slow = new G1PreBarrierStub(pre_val);
1511 }
1513 __ branch(lir_cond_notEqual, T_INT, slow);
1514 __ branch_destination(slow->continuation());
1515 }
1517 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1518 // If the "new_val" is a constant NULL, no barrier is necessary.
1519 if (new_val->is_constant() &&
1520 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1522 if (!new_val->is_register()) {
1523 LIR_Opr new_val_reg = new_register(T_OBJECT);
1524 if (new_val->is_constant()) {
1525 __ move(new_val, new_val_reg);
1526 } else {
1527 __ leal(new_val, new_val_reg);
1528 }
1529 new_val = new_val_reg;
1530 }
1531 assert(new_val->is_register(), "must be a register at this point");
1533 if (addr->is_address()) {
1534 LIR_Address* address = addr->as_address_ptr();
1535 LIR_Opr ptr = new_pointer_register();
1536 if (!address->index()->is_valid() && address->disp() == 0) {
1537 __ move(address->base(), ptr);
1538 } else {
1539 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1540 __ leal(addr, ptr);
1541 }
1542 addr = ptr;
1543 }
1544 assert(addr->is_register(), "must be a register at this point");
1546 LIR_Opr xor_res = new_pointer_register();
1547 LIR_Opr xor_shift_res = new_pointer_register();
1548 if (TwoOperandLIRForm ) {
1549 __ move(addr, xor_res);
1550 __ logical_xor(xor_res, new_val, xor_res);
1551 __ move(xor_res, xor_shift_res);
1552 __ unsigned_shift_right(xor_shift_res,
1553 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1554 xor_shift_res,
1555 LIR_OprDesc::illegalOpr());
1556 } else {
1557 __ logical_xor(addr, new_val, xor_res);
1558 __ unsigned_shift_right(xor_res,
1559 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1560 xor_shift_res,
1561 LIR_OprDesc::illegalOpr());
1562 }
1564 if (!new_val->is_register()) {
1565 LIR_Opr new_val_reg = new_register(T_OBJECT);
1566 __ leal(new_val, new_val_reg);
1567 new_val = new_val_reg;
1568 }
1569 assert(new_val->is_register(), "must be a register at this point");
1571 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1573 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1574 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1575 __ branch_destination(slow->continuation());
1576 }
1578 #endif // INCLUDE_ALL_GCS
1579 ////////////////////////////////////////////////////////////////////////
1581 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1583 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1584 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1585 if (addr->is_address()) {
1586 LIR_Address* address = addr->as_address_ptr();
1587 // ptr cannot be an object because we use this barrier for array card marks
1588 // and addr can point in the middle of an array.
1589 LIR_Opr ptr = new_pointer_register();
1590 if (!address->index()->is_valid() && address->disp() == 0) {
1591 __ move(address->base(), ptr);
1592 } else {
1593 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1594 __ leal(addr, ptr);
1595 }
1596 addr = ptr;
1597 }
1598 assert(addr->is_register(), "must be a register at this point");
1600 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1601 CardTableModRef_post_barrier_helper(addr, card_table_base);
1602 #else
1603 LIR_Opr tmp = new_pointer_register();
1604 if (TwoOperandLIRForm) {
1605 __ move(addr, tmp);
1606 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1607 } else {
1608 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1609 }
1610 if (can_inline_as_constant(card_table_base)) {
1611 __ move(LIR_OprFact::intConst(0),
1612 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1613 } else {
1614 __ move(LIR_OprFact::intConst(0),
1615 new LIR_Address(tmp, load_constant(card_table_base),
1616 T_BYTE));
1617 }
1618 #endif
1619 }
1622 //------------------------field access--------------------------------------
1624 // Comment copied form templateTable_i486.cpp
1625 // ----------------------------------------------------------------------------
1626 // Volatile variables demand their effects be made known to all CPU's in
1627 // order. Store buffers on most chips allow reads & writes to reorder; the
1628 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1629 // memory barrier (i.e., it's not sufficient that the interpreter does not
1630 // reorder volatile references, the hardware also must not reorder them).
1631 //
1632 // According to the new Java Memory Model (JMM):
1633 // (1) All volatiles are serialized wrt to each other.
1634 // ALSO reads & writes act as aquire & release, so:
1635 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1636 // the read float up to before the read. It's OK for non-volatile memory refs
1637 // that happen before the volatile read to float down below it.
1638 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1639 // that happen BEFORE the write float down to after the write. It's OK for
1640 // non-volatile memory refs that happen after the volatile write to float up
1641 // before it.
1642 //
1643 // We only put in barriers around volatile refs (they are expensive), not
1644 // _between_ memory refs (that would require us to track the flavor of the
1645 // previous memory refs). Requirements (2) and (3) require some barriers
1646 // before volatile stores and after volatile loads. These nearly cover
1647 // requirement (1) but miss the volatile-store-volatile-load case. This final
1648 // case is placed after volatile-stores although it could just as well go
1649 // before volatile-loads.
1652 void LIRGenerator::do_StoreField(StoreField* x) {
1653 bool needs_patching = x->needs_patching();
1654 bool is_volatile = x->field()->is_volatile();
1655 BasicType field_type = x->field_type();
1656 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1658 CodeEmitInfo* info = NULL;
1659 if (needs_patching) {
1660 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1661 info = state_for(x, x->state_before());
1662 } else if (x->needs_null_check()) {
1663 NullCheck* nc = x->explicit_null_check();
1664 if (nc == NULL) {
1665 info = state_for(x);
1666 } else {
1667 info = state_for(nc);
1668 }
1669 }
1672 LIRItem object(x->obj(), this);
1673 LIRItem value(x->value(), this);
1675 object.load_item();
1677 if (is_volatile || needs_patching) {
1678 // load item if field is volatile (fewer special cases for volatiles)
1679 // load item if field not initialized
1680 // load item if field not constant
1681 // because of code patching we cannot inline constants
1682 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1683 value.load_byte_item();
1684 } else {
1685 value.load_item();
1686 }
1687 } else {
1688 value.load_for_store(field_type);
1689 }
1691 set_no_result(x);
1693 #ifndef PRODUCT
1694 if (PrintNotLoaded && needs_patching) {
1695 tty->print_cr(" ###class not loaded at store_%s bci %d",
1696 x->is_static() ? "static" : "field", x->printable_bci());
1697 }
1698 #endif
1700 if (x->needs_null_check() &&
1701 (needs_patching ||
1702 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1703 // Emit an explicit null check because the offset is too large.
1704 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1705 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1706 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1707 }
1709 LIR_Address* address;
1710 if (needs_patching) {
1711 // we need to patch the offset in the instruction so don't allow
1712 // generate_address to try to be smart about emitting the -1.
1713 // Otherwise the patching code won't know how to find the
1714 // instruction to patch.
1715 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1716 } else {
1717 address = generate_address(object.result(), x->offset(), field_type);
1718 }
1720 if (is_volatile && os::is_MP()) {
1721 __ membar_release();
1722 }
1724 if (is_oop) {
1725 // Do the pre-write barrier, if any.
1726 pre_barrier(LIR_OprFact::address(address),
1727 LIR_OprFact::illegalOpr /* pre_val */,
1728 true /* do_load*/,
1729 needs_patching,
1730 (info ? new CodeEmitInfo(info) : NULL));
1731 }
1733 if (is_volatile && !needs_patching) {
1734 volatile_field_store(value.result(), address, info);
1735 } else {
1736 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1737 __ store(value.result(), address, info, patch_code);
1738 }
1740 if (is_oop) {
1741 // Store to object so mark the card of the header
1742 post_barrier(object.result(), value.result());
1743 }
1745 if (is_volatile && os::is_MP()) {
1746 __ membar();
1747 }
1748 }
1751 void LIRGenerator::do_LoadField(LoadField* x) {
1752 bool needs_patching = x->needs_patching();
1753 bool is_volatile = x->field()->is_volatile();
1754 BasicType field_type = x->field_type();
1756 CodeEmitInfo* info = NULL;
1757 if (needs_patching) {
1758 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1759 info = state_for(x, x->state_before());
1760 } else if (x->needs_null_check()) {
1761 NullCheck* nc = x->explicit_null_check();
1762 if (nc == NULL) {
1763 info = state_for(x);
1764 } else {
1765 info = state_for(nc);
1766 }
1767 }
1769 LIRItem object(x->obj(), this);
1771 object.load_item();
1773 #ifndef PRODUCT
1774 if (PrintNotLoaded && needs_patching) {
1775 tty->print_cr(" ###class not loaded at load_%s bci %d",
1776 x->is_static() ? "static" : "field", x->printable_bci());
1777 }
1778 #endif
1780 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1781 if (x->needs_null_check() &&
1782 (needs_patching ||
1783 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1784 stress_deopt)) {
1785 LIR_Opr obj = object.result();
1786 if (stress_deopt) {
1787 obj = new_register(T_OBJECT);
1788 __ move(LIR_OprFact::oopConst(NULL), obj);
1789 }
1790 // Emit an explicit null check because the offset is too large.
1791 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1792 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1793 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1794 }
1796 LIR_Opr reg = rlock_result(x, field_type);
1797 LIR_Address* address;
1798 if (needs_patching) {
1799 // we need to patch the offset in the instruction so don't allow
1800 // generate_address to try to be smart about emitting the -1.
1801 // Otherwise the patching code won't know how to find the
1802 // instruction to patch.
1803 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1804 } else {
1805 address = generate_address(object.result(), x->offset(), field_type);
1806 }
1808 if (is_volatile && !needs_patching) {
1809 volatile_field_load(address, reg, info);
1810 } else {
1811 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1812 __ load(address, reg, info, patch_code);
1813 }
1815 if (is_volatile && os::is_MP()) {
1816 __ membar_acquire();
1817 }
1818 }
1821 //------------------------java.nio.Buffer.checkIndex------------------------
1823 // int java.nio.Buffer.checkIndex(int)
1824 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1825 // NOTE: by the time we are in checkIndex() we are guaranteed that
1826 // the buffer is non-null (because checkIndex is package-private and
1827 // only called from within other methods in the buffer).
1828 assert(x->number_of_arguments() == 2, "wrong type");
1829 LIRItem buf (x->argument_at(0), this);
1830 LIRItem index(x->argument_at(1), this);
1831 buf.load_item();
1832 index.load_item();
1834 LIR_Opr result = rlock_result(x);
1835 if (GenerateRangeChecks) {
1836 CodeEmitInfo* info = state_for(x);
1837 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1838 if (index.result()->is_constant()) {
1839 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1840 __ branch(lir_cond_belowEqual, T_INT, stub);
1841 } else {
1842 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1843 java_nio_Buffer::limit_offset(), T_INT, info);
1844 __ branch(lir_cond_aboveEqual, T_INT, stub);
1845 }
1846 __ move(index.result(), result);
1847 } else {
1848 // Just load the index into the result register
1849 __ move(index.result(), result);
1850 }
1851 }
1854 //------------------------array access--------------------------------------
1857 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1858 LIRItem array(x->array(), this);
1859 array.load_item();
1860 LIR_Opr reg = rlock_result(x);
1862 CodeEmitInfo* info = NULL;
1863 if (x->needs_null_check()) {
1864 NullCheck* nc = x->explicit_null_check();
1865 if (nc == NULL) {
1866 info = state_for(x);
1867 } else {
1868 info = state_for(nc);
1869 }
1870 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1871 LIR_Opr obj = new_register(T_OBJECT);
1872 __ move(LIR_OprFact::oopConst(NULL), obj);
1873 __ null_check(obj, new CodeEmitInfo(info));
1874 }
1875 }
1876 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1877 }
1880 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1881 bool use_length = x->length() != NULL;
1882 LIRItem array(x->array(), this);
1883 LIRItem index(x->index(), this);
1884 LIRItem length(this);
1885 bool needs_range_check = x->compute_needs_range_check();
1887 if (use_length && needs_range_check) {
1888 length.set_instruction(x->length());
1889 length.load_item();
1890 }
1892 array.load_item();
1893 if (index.is_constant() && can_inline_as_constant(x->index())) {
1894 // let it be a constant
1895 index.dont_load_item();
1896 } else {
1897 index.load_item();
1898 }
1900 CodeEmitInfo* range_check_info = state_for(x);
1901 CodeEmitInfo* null_check_info = NULL;
1902 if (x->needs_null_check()) {
1903 NullCheck* nc = x->explicit_null_check();
1904 if (nc != NULL) {
1905 null_check_info = state_for(nc);
1906 } else {
1907 null_check_info = range_check_info;
1908 }
1909 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1910 LIR_Opr obj = new_register(T_OBJECT);
1911 __ move(LIR_OprFact::oopConst(NULL), obj);
1912 __ null_check(obj, new CodeEmitInfo(null_check_info));
1913 }
1914 }
1916 // emit array address setup early so it schedules better
1917 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1919 if (GenerateRangeChecks && needs_range_check) {
1920 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1921 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1922 } else if (use_length) {
1923 // TODO: use a (modified) version of array_range_check that does not require a
1924 // constant length to be loaded to a register
1925 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1926 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1927 } else {
1928 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1929 // The range check performs the null check, so clear it out for the load
1930 null_check_info = NULL;
1931 }
1932 }
1934 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1935 }
1938 void LIRGenerator::do_NullCheck(NullCheck* x) {
1939 if (x->can_trap()) {
1940 LIRItem value(x->obj(), this);
1941 value.load_item();
1942 CodeEmitInfo* info = state_for(x);
1943 __ null_check(value.result(), info);
1944 }
1945 }
1948 void LIRGenerator::do_TypeCast(TypeCast* x) {
1949 LIRItem value(x->obj(), this);
1950 value.load_item();
1951 // the result is the same as from the node we are casting
1952 set_result(x, value.result());
1953 }
1956 void LIRGenerator::do_Throw(Throw* x) {
1957 LIRItem exception(x->exception(), this);
1958 exception.load_item();
1959 set_no_result(x);
1960 LIR_Opr exception_opr = exception.result();
1961 CodeEmitInfo* info = state_for(x, x->state());
1963 #ifndef PRODUCT
1964 if (PrintC1Statistics) {
1965 increment_counter(Runtime1::throw_count_address(), T_INT);
1966 }
1967 #endif
1969 // check if the instruction has an xhandler in any of the nested scopes
1970 bool unwind = false;
1971 if (info->exception_handlers()->length() == 0) {
1972 // this throw is not inside an xhandler
1973 unwind = true;
1974 } else {
1975 // get some idea of the throw type
1976 bool type_is_exact = true;
1977 ciType* throw_type = x->exception()->exact_type();
1978 if (throw_type == NULL) {
1979 type_is_exact = false;
1980 throw_type = x->exception()->declared_type();
1981 }
1982 if (throw_type != NULL && throw_type->is_instance_klass()) {
1983 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1984 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1985 }
1986 }
1988 // do null check before moving exception oop into fixed register
1989 // to avoid a fixed interval with an oop during the null check.
1990 // Use a copy of the CodeEmitInfo because debug information is
1991 // different for null_check and throw.
1992 if (GenerateCompilerNullChecks &&
1993 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
1994 // if the exception object wasn't created using new then it might be null.
1995 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
1996 }
1998 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
1999 // we need to go through the exception lookup path to get JVMTI
2000 // notification done
2001 unwind = false;
2002 }
2004 // move exception oop into fixed register
2005 __ move(exception_opr, exceptionOopOpr());
2007 if (unwind) {
2008 __ unwind_exception(exceptionOopOpr());
2009 } else {
2010 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2011 }
2012 }
2015 void LIRGenerator::do_RoundFP(RoundFP* x) {
2016 LIRItem input(x->input(), this);
2017 input.load_item();
2018 LIR_Opr input_opr = input.result();
2019 assert(input_opr->is_register(), "why round if value is not in a register?");
2020 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2021 if (input_opr->is_single_fpu()) {
2022 set_result(x, round_item(input_opr)); // This code path not currently taken
2023 } else {
2024 LIR_Opr result = new_register(T_DOUBLE);
2025 set_vreg_flag(result, must_start_in_memory);
2026 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2027 set_result(x, result);
2028 }
2029 }
2031 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
2032 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2033 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2034 LIRItem base(x->base(), this);
2035 LIRItem idx(this);
2037 base.load_item();
2038 if (x->has_index()) {
2039 idx.set_instruction(x->index());
2040 idx.load_nonconstant();
2041 }
2043 LIR_Opr reg = rlock_result(x, x->basic_type());
2045 int log2_scale = 0;
2046 if (x->has_index()) {
2047 log2_scale = x->log2_scale();
2048 }
2050 assert(!x->has_index() || idx.value() == x->index(), "should match");
2052 LIR_Opr base_op = base.result();
2053 LIR_Opr index_op = idx.result();
2054 #ifndef _LP64
2055 if (base_op->type() == T_LONG) {
2056 base_op = new_register(T_INT);
2057 __ convert(Bytecodes::_l2i, base.result(), base_op);
2058 }
2059 if (x->has_index()) {
2060 if (index_op->type() == T_LONG) {
2061 LIR_Opr long_index_op = index_op;
2062 if (index_op->is_constant()) {
2063 long_index_op = new_register(T_LONG);
2064 __ move(index_op, long_index_op);
2065 }
2066 index_op = new_register(T_INT);
2067 __ convert(Bytecodes::_l2i, long_index_op, index_op);
2068 } else {
2069 assert(x->index()->type()->tag() == intTag, "must be");
2070 }
2071 }
2072 // At this point base and index should be all ints.
2073 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2074 assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2075 #else
2076 if (x->has_index()) {
2077 if (index_op->type() == T_INT) {
2078 if (!index_op->is_constant()) {
2079 index_op = new_register(T_LONG);
2080 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2081 }
2082 } else {
2083 assert(index_op->type() == T_LONG, "must be");
2084 if (index_op->is_constant()) {
2085 index_op = new_register(T_LONG);
2086 __ move(idx.result(), index_op);
2087 }
2088 }
2089 }
2090 // At this point base is a long non-constant
2091 // Index is a long register or a int constant.
2092 // We allow the constant to stay an int because that would allow us a more compact encoding by
2093 // embedding an immediate offset in the address expression. If we have a long constant, we have to
2094 // move it into a register first.
2095 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2096 assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2097 (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2098 #endif
2100 BasicType dst_type = x->basic_type();
2102 LIR_Address* addr;
2103 if (index_op->is_constant()) {
2104 assert(log2_scale == 0, "must not have a scale");
2105 assert(index_op->type() == T_INT, "only int constants supported");
2106 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2107 } else {
2108 #ifdef X86
2109 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2110 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2111 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2112 #else
2113 if (index_op->is_illegal() || log2_scale == 0) {
2114 addr = new LIR_Address(base_op, index_op, dst_type);
2115 } else {
2116 LIR_Opr tmp = new_pointer_register();
2117 __ shift_left(index_op, log2_scale, tmp);
2118 addr = new LIR_Address(base_op, tmp, dst_type);
2119 }
2120 #endif
2121 }
2123 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2124 __ unaligned_move(addr, reg);
2125 } else {
2126 if (dst_type == T_OBJECT && x->is_wide()) {
2127 __ move_wide(addr, reg);
2128 } else {
2129 __ move(addr, reg);
2130 }
2131 }
2132 }
2135 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2136 int log2_scale = 0;
2137 BasicType type = x->basic_type();
2139 if (x->has_index()) {
2140 log2_scale = x->log2_scale();
2141 }
2143 LIRItem base(x->base(), this);
2144 LIRItem value(x->value(), this);
2145 LIRItem idx(this);
2147 base.load_item();
2148 if (x->has_index()) {
2149 idx.set_instruction(x->index());
2150 idx.load_item();
2151 }
2153 if (type == T_BYTE || type == T_BOOLEAN) {
2154 value.load_byte_item();
2155 } else {
2156 value.load_item();
2157 }
2159 set_no_result(x);
2161 LIR_Opr base_op = base.result();
2162 LIR_Opr index_op = idx.result();
2164 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2165 LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2166 #else
2167 #ifndef _LP64
2168 if (base_op->type() == T_LONG) {
2169 base_op = new_register(T_INT);
2170 __ convert(Bytecodes::_l2i, base.result(), base_op);
2171 }
2172 if (x->has_index()) {
2173 if (index_op->type() == T_LONG) {
2174 index_op = new_register(T_INT);
2175 __ convert(Bytecodes::_l2i, idx.result(), index_op);
2176 }
2177 }
2178 // At this point base and index should be all ints and not constants
2179 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2180 assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2181 #else
2182 if (x->has_index()) {
2183 if (index_op->type() == T_INT) {
2184 index_op = new_register(T_LONG);
2185 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2186 }
2187 }
2188 // At this point base and index are long and non-constant
2189 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2190 assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2191 #endif
2193 if (log2_scale != 0) {
2194 // temporary fix (platform dependent code without shift on Intel would be better)
2195 // TODO: ARM also allows embedded shift in the address
2196 LIR_Opr tmp = new_pointer_register();
2197 if (TwoOperandLIRForm) {
2198 __ move(index_op, tmp);
2199 index_op = tmp;
2200 }
2201 __ shift_left(index_op, log2_scale, tmp);
2202 if (!TwoOperandLIRForm) {
2203 index_op = tmp;
2204 }
2205 }
2207 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2208 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2209 __ move(value.result(), addr);
2210 }
2213 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2214 BasicType type = x->basic_type();
2215 LIRItem src(x->object(), this);
2216 LIRItem off(x->offset(), this);
2218 off.load_item();
2219 src.load_item();
2221 LIR_Opr value = rlock_result(x, x->basic_type());
2223 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2225 #if INCLUDE_ALL_GCS
2226 // We might be reading the value of the referent field of a
2227 // Reference object in order to attach it back to the live
2228 // object graph. If G1 is enabled then we need to record
2229 // the value that is being returned in an SATB log buffer.
2230 //
2231 // We need to generate code similar to the following...
2232 //
2233 // if (offset == java_lang_ref_Reference::referent_offset) {
2234 // if (src != NULL) {
2235 // if (klass(src)->reference_type() != REF_NONE) {
2236 // pre_barrier(..., value, ...);
2237 // }
2238 // }
2239 // }
2241 if (UseG1GC && type == T_OBJECT) {
2242 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2243 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2244 bool gen_source_check = true; // Assume we need to check the src object for null.
2245 bool gen_type_check = true; // Assume we need to check the reference_type.
2247 if (off.is_constant()) {
2248 jlong off_con = (off.type()->is_int() ?
2249 (jlong) off.get_jint_constant() :
2250 off.get_jlong_constant());
2253 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2254 // The constant offset is something other than referent_offset.
2255 // We can skip generating/checking the remaining guards and
2256 // skip generation of the code stub.
2257 gen_pre_barrier = false;
2258 } else {
2259 // The constant offset is the same as referent_offset -
2260 // we do not need to generate a runtime offset check.
2261 gen_offset_check = false;
2262 }
2263 }
2265 // We don't need to generate stub if the source object is an array
2266 if (gen_pre_barrier && src.type()->is_array()) {
2267 gen_pre_barrier = false;
2268 }
2270 if (gen_pre_barrier) {
2271 // We still need to continue with the checks.
2272 if (src.is_constant()) {
2273 ciObject* src_con = src.get_jobject_constant();
2274 guarantee(src_con != NULL, "no source constant");
2276 if (src_con->is_null_object()) {
2277 // The constant src object is null - We can skip
2278 // generating the code stub.
2279 gen_pre_barrier = false;
2280 } else {
2281 // Non-null constant source object. We still have to generate
2282 // the slow stub - but we don't need to generate the runtime
2283 // null object check.
2284 gen_source_check = false;
2285 }
2286 }
2287 }
2288 if (gen_pre_barrier && !PatchALot) {
2289 // Can the klass of object be statically determined to be
2290 // a sub-class of Reference?
2291 ciType* type = src.value()->declared_type();
2292 if ((type != NULL) && type->is_loaded()) {
2293 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2294 gen_type_check = false;
2295 } else if (type->is_klass() &&
2296 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2297 // Not Reference and not Object klass.
2298 gen_pre_barrier = false;
2299 }
2300 }
2301 }
2303 if (gen_pre_barrier) {
2304 LabelObj* Lcont = new LabelObj();
2306 // We can have generate one runtime check here. Let's start with
2307 // the offset check.
2308 if (gen_offset_check) {
2309 // if (offset != referent_offset) -> continue
2310 // If offset is an int then we can do the comparison with the
2311 // referent_offset constant; otherwise we need to move
2312 // referent_offset into a temporary register and generate
2313 // a reg-reg compare.
2315 LIR_Opr referent_off;
2317 if (off.type()->is_int()) {
2318 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2319 } else {
2320 assert(off.type()->is_long(), "what else?");
2321 referent_off = new_register(T_LONG);
2322 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2323 }
2324 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2325 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2326 }
2327 if (gen_source_check) {
2328 // offset is a const and equals referent offset
2329 // if (source == null) -> continue
2330 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2331 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2332 }
2333 LIR_Opr src_klass = new_register(T_OBJECT);
2334 if (gen_type_check) {
2335 // We have determined that offset == referent_offset && src != null.
2336 // if (src->_klass->_reference_type == REF_NONE) -> continue
2337 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2338 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2339 LIR_Opr reference_type = new_register(T_INT);
2340 __ move(reference_type_addr, reference_type);
2341 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2342 __ branch(lir_cond_equal, T_INT, Lcont->label());
2343 }
2344 {
2345 // We have determined that src->_klass->_reference_type != REF_NONE
2346 // so register the value in the referent field with the pre-barrier.
2347 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2348 value /* pre_val */,
2349 false /* do_load */,
2350 false /* patch */,
2351 NULL /* info */);
2352 }
2353 __ branch_destination(Lcont->label());
2354 }
2355 }
2356 #endif // INCLUDE_ALL_GCS
2358 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2359 }
2362 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2363 BasicType type = x->basic_type();
2364 LIRItem src(x->object(), this);
2365 LIRItem off(x->offset(), this);
2366 LIRItem data(x->value(), this);
2368 src.load_item();
2369 if (type == T_BOOLEAN || type == T_BYTE) {
2370 data.load_byte_item();
2371 } else {
2372 data.load_item();
2373 }
2374 off.load_item();
2376 set_no_result(x);
2378 if (x->is_volatile() && os::is_MP()) __ membar_release();
2379 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2380 if (x->is_volatile() && os::is_MP()) __ membar();
2381 }
2384 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2385 LIRItem src(x->object(), this);
2386 LIRItem off(x->offset(), this);
2388 src.load_item();
2389 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2390 // let it be a constant
2391 off.dont_load_item();
2392 } else {
2393 off.load_item();
2394 }
2396 set_no_result(x);
2398 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2399 __ prefetch(addr, is_store);
2400 }
2403 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2404 do_UnsafePrefetch(x, false);
2405 }
2408 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2409 do_UnsafePrefetch(x, true);
2410 }
2413 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2414 int lng = x->length();
2416 for (int i = 0; i < lng; i++) {
2417 SwitchRange* one_range = x->at(i);
2418 int low_key = one_range->low_key();
2419 int high_key = one_range->high_key();
2420 BlockBegin* dest = one_range->sux();
2421 if (low_key == high_key) {
2422 __ cmp(lir_cond_equal, value, low_key);
2423 __ branch(lir_cond_equal, T_INT, dest);
2424 } else if (high_key - low_key == 1) {
2425 __ cmp(lir_cond_equal, value, low_key);
2426 __ branch(lir_cond_equal, T_INT, dest);
2427 __ cmp(lir_cond_equal, value, high_key);
2428 __ branch(lir_cond_equal, T_INT, dest);
2429 } else {
2430 LabelObj* L = new LabelObj();
2431 __ cmp(lir_cond_less, value, low_key);
2432 __ branch(lir_cond_less, T_INT, L->label());
2433 __ cmp(lir_cond_lessEqual, value, high_key);
2434 __ branch(lir_cond_lessEqual, T_INT, dest);
2435 __ branch_destination(L->label());
2436 }
2437 }
2438 __ jump(default_sux);
2439 }
2442 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2443 SwitchRangeList* res = new SwitchRangeList();
2444 int len = x->length();
2445 if (len > 0) {
2446 BlockBegin* sux = x->sux_at(0);
2447 int key = x->lo_key();
2448 BlockBegin* default_sux = x->default_sux();
2449 SwitchRange* range = new SwitchRange(key, sux);
2450 for (int i = 0; i < len; i++, key++) {
2451 BlockBegin* new_sux = x->sux_at(i);
2452 if (sux == new_sux) {
2453 // still in same range
2454 range->set_high_key(key);
2455 } else {
2456 // skip tests which explicitly dispatch to the default
2457 if (sux != default_sux) {
2458 res->append(range);
2459 }
2460 range = new SwitchRange(key, new_sux);
2461 }
2462 sux = new_sux;
2463 }
2464 if (res->length() == 0 || res->last() != range) res->append(range);
2465 }
2466 return res;
2467 }
2470 // we expect the keys to be sorted by increasing value
2471 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2472 SwitchRangeList* res = new SwitchRangeList();
2473 int len = x->length();
2474 if (len > 0) {
2475 BlockBegin* default_sux = x->default_sux();
2476 int key = x->key_at(0);
2477 BlockBegin* sux = x->sux_at(0);
2478 SwitchRange* range = new SwitchRange(key, sux);
2479 for (int i = 1; i < len; i++) {
2480 int new_key = x->key_at(i);
2481 BlockBegin* new_sux = x->sux_at(i);
2482 if (key+1 == new_key && sux == new_sux) {
2483 // still in same range
2484 range->set_high_key(new_key);
2485 } else {
2486 // skip tests which explicitly dispatch to the default
2487 if (range->sux() != default_sux) {
2488 res->append(range);
2489 }
2490 range = new SwitchRange(new_key, new_sux);
2491 }
2492 key = new_key;
2493 sux = new_sux;
2494 }
2495 if (res->length() == 0 || res->last() != range) res->append(range);
2496 }
2497 return res;
2498 }
2501 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2502 LIRItem tag(x->tag(), this);
2503 tag.load_item();
2504 set_no_result(x);
2506 if (x->is_safepoint()) {
2507 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2508 }
2510 // move values into phi locations
2511 move_to_phi(x->state());
2513 int lo_key = x->lo_key();
2514 int hi_key = x->hi_key();
2515 int len = x->length();
2516 LIR_Opr value = tag.result();
2517 if (UseTableRanges) {
2518 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2519 } else {
2520 for (int i = 0; i < len; i++) {
2521 __ cmp(lir_cond_equal, value, i + lo_key);
2522 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2523 }
2524 __ jump(x->default_sux());
2525 }
2526 }
2529 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2530 LIRItem tag(x->tag(), this);
2531 tag.load_item();
2532 set_no_result(x);
2534 if (x->is_safepoint()) {
2535 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2536 }
2538 // move values into phi locations
2539 move_to_phi(x->state());
2541 LIR_Opr value = tag.result();
2542 if (UseTableRanges) {
2543 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2544 } else {
2545 int len = x->length();
2546 for (int i = 0; i < len; i++) {
2547 __ cmp(lir_cond_equal, value, x->key_at(i));
2548 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2549 }
2550 __ jump(x->default_sux());
2551 }
2552 }
2555 void LIRGenerator::do_Goto(Goto* x) {
2556 set_no_result(x);
2558 if (block()->next()->as_OsrEntry()) {
2559 // need to free up storage used for OSR entry point
2560 LIR_Opr osrBuffer = block()->next()->operand();
2561 BasicTypeList signature;
2562 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2563 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2564 __ move(osrBuffer, cc->args()->at(0));
2565 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2566 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2567 }
2569 if (x->is_safepoint()) {
2570 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2572 // increment backedge counter if needed
2573 CodeEmitInfo* info = state_for(x, state);
2574 increment_backedge_counter(info, x->profiled_bci());
2575 CodeEmitInfo* safepoint_info = state_for(x, state);
2576 __ safepoint(safepoint_poll_register(), safepoint_info);
2577 }
2579 // Gotos can be folded Ifs, handle this case.
2580 if (x->should_profile()) {
2581 ciMethod* method = x->profiled_method();
2582 assert(method != NULL, "method should be set if branch is profiled");
2583 ciMethodData* md = method->method_data_or_null();
2584 assert(md != NULL, "Sanity");
2585 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2586 assert(data != NULL, "must have profiling data");
2587 int offset;
2588 if (x->direction() == Goto::taken) {
2589 assert(data->is_BranchData(), "need BranchData for two-way branches");
2590 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2591 } else if (x->direction() == Goto::not_taken) {
2592 assert(data->is_BranchData(), "need BranchData for two-way branches");
2593 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2594 } else {
2595 assert(data->is_JumpData(), "need JumpData for branches");
2596 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2597 }
2598 LIR_Opr md_reg = new_register(T_METADATA);
2599 __ metadata2reg(md->constant_encoding(), md_reg);
2601 increment_counter(new LIR_Address(md_reg, offset,
2602 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2603 }
2605 // emit phi-instruction move after safepoint since this simplifies
2606 // describing the state as the safepoint.
2607 move_to_phi(x->state());
2609 __ jump(x->default_sux());
2610 }
2612 /**
2613 * Emit profiling code if needed for arguments, parameters, return value types
2614 *
2615 * @param md MDO the code will update at runtime
2616 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2617 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2618 * @param profiled_k current profile
2619 * @param obj IR node for the object to be profiled
2620 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2621 * Set once we find an update to make and use for next ones.
2622 * @param not_null true if we know obj cannot be null
2623 * @param signature_at_call_k signature at call for obj
2624 * @param callee_signature_k signature of callee for obj
2625 * at call and callee signatures differ at method handle call
2626 * @return the only klass we know will ever be seen at this profile point
2627 */
2628 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2629 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2630 ciKlass* callee_signature_k) {
2631 ciKlass* result = NULL;
2632 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2633 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2634 // known not to be null or null bit already set and already set to
2635 // unknown: nothing we can do to improve profiling
2636 if (!do_null && !do_update) {
2637 return result;
2638 }
2640 ciKlass* exact_klass = NULL;
2641 Compilation* comp = Compilation::current();
2642 if (do_update) {
2643 // try to find exact type, using CHA if possible, so that loading
2644 // the klass from the object can be avoided
2645 ciType* type = obj->exact_type();
2646 if (type == NULL) {
2647 type = obj->declared_type();
2648 type = comp->cha_exact_type(type);
2649 }
2650 assert(type == NULL || type->is_klass(), "type should be class");
2651 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2653 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2654 }
2656 if (!do_null && !do_update) {
2657 return result;
2658 }
2660 ciKlass* exact_signature_k = NULL;
2661 if (do_update) {
2662 // Is the type from the signature exact (the only one possible)?
2663 exact_signature_k = signature_at_call_k->exact_klass();
2664 if (exact_signature_k == NULL) {
2665 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2666 } else {
2667 result = exact_signature_k;
2668 // Known statically. No need to emit any code: prevent
2669 // LIR_Assembler::emit_profile_type() from emitting useless code
2670 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2671 }
2672 // exact_klass and exact_signature_k can be both non NULL but
2673 // different if exact_klass is loaded after the ciObject for
2674 // exact_signature_k is created.
2675 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2676 // sometimes the type of the signature is better than the best type
2677 // the compiler has
2678 exact_klass = exact_signature_k;
2679 }
2680 if (callee_signature_k != NULL &&
2681 callee_signature_k != signature_at_call_k) {
2682 ciKlass* improved_klass = callee_signature_k->exact_klass();
2683 if (improved_klass == NULL) {
2684 improved_klass = comp->cha_exact_type(callee_signature_k);
2685 }
2686 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2687 exact_klass = exact_signature_k;
2688 }
2689 }
2690 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2691 }
2693 if (!do_null && !do_update) {
2694 return result;
2695 }
2697 if (mdp == LIR_OprFact::illegalOpr) {
2698 mdp = new_register(T_METADATA);
2699 __ metadata2reg(md->constant_encoding(), mdp);
2700 if (md_base_offset != 0) {
2701 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2702 mdp = new_pointer_register();
2703 __ leal(LIR_OprFact::address(base_type_address), mdp);
2704 }
2705 }
2706 LIRItem value(obj, this);
2707 value.load_item();
2708 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2709 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2710 return result;
2711 }
2713 // profile parameters on entry to the root of the compilation
2714 void LIRGenerator::profile_parameters(Base* x) {
2715 if (compilation()->profile_parameters()) {
2716 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2717 ciMethodData* md = scope()->method()->method_data_or_null();
2718 assert(md != NULL, "Sanity");
2720 if (md->parameters_type_data() != NULL) {
2721 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2722 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2723 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2724 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2725 LIR_Opr src = args->at(i);
2726 assert(!src->is_illegal(), "check");
2727 BasicType t = src->type();
2728 if (t == T_OBJECT || t == T_ARRAY) {
2729 intptr_t profiled_k = parameters->type(j);
2730 Local* local = x->state()->local_at(java_index)->as_Local();
2731 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2732 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2733 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2734 // If the profile is known statically set it once for all and do not emit any code
2735 if (exact != NULL) {
2736 md->set_parameter_type(j, exact);
2737 }
2738 j++;
2739 }
2740 java_index += type2size[t];
2741 }
2742 }
2743 }
2744 }
2746 void LIRGenerator::do_Base(Base* x) {
2747 __ std_entry(LIR_OprFact::illegalOpr);
2748 // Emit moves from physical registers / stack slots to virtual registers
2749 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2750 IRScope* irScope = compilation()->hir()->top_scope();
2751 int java_index = 0;
2752 for (int i = 0; i < args->length(); i++) {
2753 LIR_Opr src = args->at(i);
2754 assert(!src->is_illegal(), "check");
2755 BasicType t = src->type();
2757 // Types which are smaller than int are passed as int, so
2758 // correct the type which passed.
2759 switch (t) {
2760 case T_BYTE:
2761 case T_BOOLEAN:
2762 case T_SHORT:
2763 case T_CHAR:
2764 t = T_INT;
2765 break;
2766 }
2768 LIR_Opr dest = new_register(t);
2769 __ move(src, dest);
2771 // Assign new location to Local instruction for this local
2772 Local* local = x->state()->local_at(java_index)->as_Local();
2773 assert(local != NULL, "Locals for incoming arguments must have been created");
2774 #ifndef __SOFTFP__
2775 // The java calling convention passes double as long and float as int.
2776 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2777 #endif // __SOFTFP__
2778 local->set_operand(dest);
2779 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2780 java_index += type2size[t];
2781 }
2783 if (compilation()->env()->dtrace_method_probes()) {
2784 BasicTypeList signature;
2785 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2786 signature.append(T_METADATA); // Method*
2787 LIR_OprList* args = new LIR_OprList();
2788 args->append(getThreadPointer());
2789 LIR_Opr meth = new_register(T_METADATA);
2790 __ metadata2reg(method()->constant_encoding(), meth);
2791 args->append(meth);
2792 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2793 }
2795 if (method()->is_synchronized()) {
2796 LIR_Opr obj;
2797 if (method()->is_static()) {
2798 obj = new_register(T_OBJECT);
2799 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2800 } else {
2801 Local* receiver = x->state()->local_at(0)->as_Local();
2802 assert(receiver != NULL, "must already exist");
2803 obj = receiver->operand();
2804 }
2805 assert(obj->is_valid(), "must be valid");
2807 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2808 LIR_Opr lock = new_register(T_INT);
2809 __ load_stack_address_monitor(0, lock);
2811 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2812 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2814 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
2815 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2816 }
2817 }
2819 // increment invocation counters if needed
2820 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2821 profile_parameters(x);
2822 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2823 increment_invocation_counter(info);
2824 }
2826 // all blocks with a successor must end with an unconditional jump
2827 // to the successor even if they are consecutive
2828 __ jump(x->default_sux());
2829 }
2832 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2833 // construct our frame and model the production of incoming pointer
2834 // to the OSR buffer.
2835 __ osr_entry(LIR_Assembler::osrBufferPointer());
2836 LIR_Opr result = rlock_result(x);
2837 __ move(LIR_Assembler::osrBufferPointer(), result);
2838 }
2841 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2842 assert(args->length() == arg_list->length(),
2843 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2844 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2845 LIRItem* param = args->at(i);
2846 LIR_Opr loc = arg_list->at(i);
2847 if (loc->is_register()) {
2848 param->load_item_force(loc);
2849 } else {
2850 LIR_Address* addr = loc->as_address_ptr();
2851 param->load_for_store(addr->type());
2852 if (addr->type() == T_OBJECT) {
2853 __ move_wide(param->result(), addr);
2854 } else
2855 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2856 __ unaligned_move(param->result(), addr);
2857 } else {
2858 __ move(param->result(), addr);
2859 }
2860 }
2861 }
2863 if (x->has_receiver()) {
2864 LIRItem* receiver = args->at(0);
2865 LIR_Opr loc = arg_list->at(0);
2866 if (loc->is_register()) {
2867 receiver->load_item_force(loc);
2868 } else {
2869 assert(loc->is_address(), "just checking");
2870 receiver->load_for_store(T_OBJECT);
2871 __ move_wide(receiver->result(), loc->as_address_ptr());
2872 }
2873 }
2874 }
2877 // Visits all arguments, returns appropriate items without loading them
2878 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2879 LIRItemList* argument_items = new LIRItemList();
2880 if (x->has_receiver()) {
2881 LIRItem* receiver = new LIRItem(x->receiver(), this);
2882 argument_items->append(receiver);
2883 }
2884 for (int i = 0; i < x->number_of_arguments(); i++) {
2885 LIRItem* param = new LIRItem(x->argument_at(i), this);
2886 argument_items->append(param);
2887 }
2888 return argument_items;
2889 }
2892 // The invoke with receiver has following phases:
2893 // a) traverse and load/lock receiver;
2894 // b) traverse all arguments -> item-array (invoke_visit_argument)
2895 // c) push receiver on stack
2896 // d) load each of the items and push on stack
2897 // e) unlock receiver
2898 // f) move receiver into receiver-register %o0
2899 // g) lock result registers and emit call operation
2900 //
2901 // Before issuing a call, we must spill-save all values on stack
2902 // that are in caller-save register. "spill-save" moves those registers
2903 // either in a free callee-save register or spills them if no free
2904 // callee save register is available.
2905 //
2906 // The problem is where to invoke spill-save.
2907 // - if invoked between e) and f), we may lock callee save
2908 // register in "spill-save" that destroys the receiver register
2909 // before f) is executed
2910 // - if we rearrange f) to be earlier (by loading %o0) it
2911 // may destroy a value on the stack that is currently in %o0
2912 // and is waiting to be spilled
2913 // - if we keep the receiver locked while doing spill-save,
2914 // we cannot spill it as it is spill-locked
2915 //
2916 void LIRGenerator::do_Invoke(Invoke* x) {
2917 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2919 LIR_OprList* arg_list = cc->args();
2920 LIRItemList* args = invoke_visit_arguments(x);
2921 LIR_Opr receiver = LIR_OprFact::illegalOpr;
2923 // setup result register
2924 LIR_Opr result_register = LIR_OprFact::illegalOpr;
2925 if (x->type() != voidType) {
2926 result_register = result_register_for(x->type());
2927 }
2929 CodeEmitInfo* info = state_for(x, x->state());
2931 invoke_load_arguments(x, args, arg_list);
2933 if (x->has_receiver()) {
2934 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2935 receiver = args->at(0)->result();
2936 }
2938 // emit invoke code
2939 bool optimized = x->target_is_loaded() && x->target_is_final();
2940 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2942 // JSR 292
2943 // Preserve the SP over MethodHandle call sites, if needed.
2944 ciMethod* target = x->target();
2945 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2946 target->is_method_handle_intrinsic() ||
2947 target->is_compiled_lambda_form());
2948 if (is_method_handle_invoke) {
2949 info->set_is_method_handle_invoke(true);
2950 if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2951 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2952 }
2953 }
2955 switch (x->code()) {
2956 case Bytecodes::_invokestatic:
2957 __ call_static(target, result_register,
2958 SharedRuntime::get_resolve_static_call_stub(),
2959 arg_list, info);
2960 break;
2961 case Bytecodes::_invokespecial:
2962 case Bytecodes::_invokevirtual:
2963 case Bytecodes::_invokeinterface:
2964 // for final target we still produce an inline cache, in order
2965 // to be able to call mixed mode
2966 if (x->code() == Bytecodes::_invokespecial || optimized) {
2967 __ call_opt_virtual(target, receiver, result_register,
2968 SharedRuntime::get_resolve_opt_virtual_call_stub(),
2969 arg_list, info);
2970 } else if (x->vtable_index() < 0) {
2971 __ call_icvirtual(target, receiver, result_register,
2972 SharedRuntime::get_resolve_virtual_call_stub(),
2973 arg_list, info);
2974 } else {
2975 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2976 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2977 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2978 }
2979 break;
2980 case Bytecodes::_invokedynamic: {
2981 __ call_dynamic(target, receiver, result_register,
2982 SharedRuntime::get_resolve_static_call_stub(),
2983 arg_list, info);
2984 break;
2985 }
2986 default:
2987 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
2988 break;
2989 }
2991 // JSR 292
2992 // Restore the SP after MethodHandle call sites, if needed.
2993 if (is_method_handle_invoke
2994 && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2995 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
2996 }
2998 if (x->type()->is_float() || x->type()->is_double()) {
2999 // Force rounding of results from non-strictfp when in strictfp
3000 // scope (or when we don't know the strictness of the callee, to
3001 // be safe.)
3002 if (method()->is_strict()) {
3003 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
3004 result_register = round_item(result_register);
3005 }
3006 }
3007 }
3009 if (result_register->is_valid()) {
3010 LIR_Opr result = rlock_result(x);
3011 __ move(result_register, result);
3012 }
3013 }
3016 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3017 assert(x->number_of_arguments() == 1, "wrong type");
3018 LIRItem value (x->argument_at(0), this);
3019 LIR_Opr reg = rlock_result(x);
3020 value.load_item();
3021 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3022 __ move(tmp, reg);
3023 }
3027 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3028 void LIRGenerator::do_IfOp(IfOp* x) {
3029 #ifdef ASSERT
3030 {
3031 ValueTag xtag = x->x()->type()->tag();
3032 ValueTag ttag = x->tval()->type()->tag();
3033 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3034 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3035 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3036 }
3037 #endif
3039 LIRItem left(x->x(), this);
3040 LIRItem right(x->y(), this);
3041 left.load_item();
3042 if (can_inline_as_constant(right.value())) {
3043 right.dont_load_item();
3044 } else {
3045 right.load_item();
3046 }
3048 LIRItem t_val(x->tval(), this);
3049 LIRItem f_val(x->fval(), this);
3050 t_val.dont_load_item();
3051 f_val.dont_load_item();
3052 LIR_Opr reg = rlock_result(x);
3054 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3055 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3056 }
3058 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3059 assert(x->number_of_arguments() == expected_arguments, "wrong type");
3060 LIR_Opr reg = result_register_for(x->type());
3061 __ call_runtime_leaf(routine, getThreadTemp(),
3062 reg, new LIR_OprList());
3063 LIR_Opr result = rlock_result(x);
3064 __ move(reg, result);
3065 }
3067 #ifdef TRACE_HAVE_INTRINSICS
3068 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3069 LIR_Opr thread = getThreadPointer();
3070 LIR_Opr osthread = new_pointer_register();
3071 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3072 size_t thread_id_size = OSThread::thread_id_size();
3073 if (thread_id_size == (size_t) BytesPerLong) {
3074 LIR_Opr id = new_register(T_LONG);
3075 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3076 __ convert(Bytecodes::_l2i, id, rlock_result(x));
3077 } else if (thread_id_size == (size_t) BytesPerInt) {
3078 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3079 } else {
3080 ShouldNotReachHere();
3081 }
3082 }
3084 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3085 CodeEmitInfo* info = state_for(x);
3086 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3087 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3088 assert(info != NULL, "must have info");
3089 LIRItem arg(x->argument_at(1), this);
3090 arg.load_item();
3091 LIR_Opr klass = new_pointer_register();
3092 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3093 LIR_Opr id = new_register(T_LONG);
3094 ByteSize offset = TRACE_ID_OFFSET;
3095 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3096 __ move(trace_id_addr, id);
3097 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3098 __ store(id, trace_id_addr);
3099 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3100 __ move(id, rlock_result(x));
3101 }
3102 #endif
3104 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3105 switch (x->id()) {
3106 case vmIntrinsics::_intBitsToFloat :
3107 case vmIntrinsics::_doubleToRawLongBits :
3108 case vmIntrinsics::_longBitsToDouble :
3109 case vmIntrinsics::_floatToRawIntBits : {
3110 do_FPIntrinsics(x);
3111 break;
3112 }
3114 #ifdef TRACE_HAVE_INTRINSICS
3115 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
3116 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
3117 case vmIntrinsics::_counterTime:
3118 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
3119 break;
3120 #endif
3122 case vmIntrinsics::_currentTimeMillis:
3123 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3124 break;
3126 case vmIntrinsics::_nanoTime:
3127 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3128 break;
3130 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3131 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3132 case vmIntrinsics::_getClass: do_getClass(x); break;
3133 case vmIntrinsics::_currentThread: do_currentThread(x); break;
3135 case vmIntrinsics::_dlog: // fall through
3136 case vmIntrinsics::_dlog10: // fall through
3137 case vmIntrinsics::_dabs: // fall through
3138 case vmIntrinsics::_dsqrt: // fall through
3139 case vmIntrinsics::_dtan: // fall through
3140 case vmIntrinsics::_dsin : // fall through
3141 case vmIntrinsics::_dcos : // fall through
3142 case vmIntrinsics::_dexp : // fall through
3143 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3144 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3146 // java.nio.Buffer.checkIndex
3147 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3149 case vmIntrinsics::_compareAndSwapObject:
3150 do_CompareAndSwap(x, objectType);
3151 break;
3152 case vmIntrinsics::_compareAndSwapInt:
3153 do_CompareAndSwap(x, intType);
3154 break;
3155 case vmIntrinsics::_compareAndSwapLong:
3156 do_CompareAndSwap(x, longType);
3157 break;
3159 case vmIntrinsics::_loadFence :
3160 if (os::is_MP()) __ membar_acquire();
3161 break;
3162 case vmIntrinsics::_storeFence:
3163 if (os::is_MP()) __ membar_release();
3164 break;
3165 case vmIntrinsics::_fullFence :
3166 if (os::is_MP()) __ membar();
3167 break;
3169 case vmIntrinsics::_Reference_get:
3170 do_Reference_get(x);
3171 break;
3173 case vmIntrinsics::_updateCRC32:
3174 case vmIntrinsics::_updateBytesCRC32:
3175 case vmIntrinsics::_updateByteBufferCRC32:
3176 do_update_CRC32(x);
3177 break;
3179 default: ShouldNotReachHere(); break;
3180 }
3181 }
3183 void LIRGenerator::profile_arguments(ProfileCall* x) {
3184 if (compilation()->profile_arguments()) {
3185 int bci = x->bci_of_invoke();
3186 ciMethodData* md = x->method()->method_data_or_null();
3187 ciProfileData* data = md->bci_to_data(bci);
3188 if (data != NULL) {
3189 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3190 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3191 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3192 int base_offset = md->byte_offset_of_slot(data, extra);
3193 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3194 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3196 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3197 int start = 0;
3198 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3199 if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3200 // first argument is not profiled at call (method handle invoke)
3201 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3202 start = 1;
3203 }
3204 ciSignature* callee_signature = x->callee()->signature();
3205 // method handle call to virtual method
3206 bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3207 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3209 bool ignored_will_link;
3210 ciSignature* signature_at_call = NULL;
3211 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3212 ciSignatureStream signature_at_call_stream(signature_at_call);
3214 // if called through method handle invoke, some arguments may have been popped
3215 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3216 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3217 ciKlass* exact = profile_type(md, base_offset, off,
3218 args->type(i), x->profiled_arg_at(i+start), mdp,
3219 !x->arg_needs_null_check(i+start),
3220 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3221 if (exact != NULL) {
3222 md->set_argument_type(bci, i, exact);
3223 }
3224 }
3225 } else {
3226 #ifdef ASSERT
3227 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3228 int n = x->nb_profiled_args();
3229 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3230 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3231 "only at JSR292 bytecodes");
3232 #endif
3233 }
3234 }
3235 }
3236 }
3238 // profile parameters on entry to an inlined method
3239 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3240 if (compilation()->profile_parameters() && x->inlined()) {
3241 ciMethodData* md = x->callee()->method_data_or_null();
3242 if (md != NULL) {
3243 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3244 if (parameters_type_data != NULL) {
3245 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3246 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3247 bool has_receiver = !x->callee()->is_static();
3248 ciSignature* sig = x->callee()->signature();
3249 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3250 int i = 0; // to iterate on the Instructions
3251 Value arg = x->recv();
3252 bool not_null = false;
3253 int bci = x->bci_of_invoke();
3254 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3255 // The first parameter is the receiver so that's what we start
3256 // with if it exists. One exception is method handle call to
3257 // virtual method: the receiver is in the args list
3258 if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3259 i = 1;
3260 arg = x->profiled_arg_at(0);
3261 not_null = !x->arg_needs_null_check(0);
3262 }
3263 int k = 0; // to iterate on the profile data
3264 for (;;) {
3265 intptr_t profiled_k = parameters->type(k);
3266 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3267 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3268 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3269 // If the profile is known statically set it once for all and do not emit any code
3270 if (exact != NULL) {
3271 md->set_parameter_type(k, exact);
3272 }
3273 k++;
3274 if (k >= parameters_type_data->number_of_parameters()) {
3275 #ifdef ASSERT
3276 int extra = 0;
3277 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3278 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3279 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3280 extra += 1;
3281 }
3282 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3283 #endif
3284 break;
3285 }
3286 arg = x->profiled_arg_at(i);
3287 not_null = !x->arg_needs_null_check(i);
3288 i++;
3289 }
3290 }
3291 }
3292 }
3293 }
3295 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3296 // Need recv in a temporary register so it interferes with the other temporaries
3297 LIR_Opr recv = LIR_OprFact::illegalOpr;
3298 LIR_Opr mdo = new_register(T_OBJECT);
3299 // tmp is used to hold the counters on SPARC
3300 LIR_Opr tmp = new_pointer_register();
3302 if (x->nb_profiled_args() > 0) {
3303 profile_arguments(x);
3304 }
3306 // profile parameters on inlined method entry including receiver
3307 if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3308 profile_parameters_at_call(x);
3309 }
3311 if (x->recv() != NULL) {
3312 LIRItem value(x->recv(), this);
3313 value.load_item();
3314 recv = new_register(T_OBJECT);
3315 __ move(value.result(), recv);
3316 }
3317 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3318 }
3320 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3321 int bci = x->bci_of_invoke();
3322 ciMethodData* md = x->method()->method_data_or_null();
3323 ciProfileData* data = md->bci_to_data(bci);
3324 if (data != NULL) {
3325 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3326 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3327 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3329 bool ignored_will_link;
3330 ciSignature* signature_at_call = NULL;
3331 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3333 // The offset within the MDO of the entry to update may be too large
3334 // to be used in load/store instructions on some platforms. So have
3335 // profile_type() compute the address of the profile in a register.
3336 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3337 ret->type(), x->ret(), mdp,
3338 !x->needs_null_check(),
3339 signature_at_call->return_type()->as_klass(),
3340 x->callee()->signature()->return_type()->as_klass());
3341 if (exact != NULL) {
3342 md->set_return_type(bci, exact);
3343 }
3344 }
3345 }
3347 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3348 // We can safely ignore accessors here, since c2 will inline them anyway,
3349 // accessors are also always mature.
3350 if (!x->inlinee()->is_accessor()) {
3351 CodeEmitInfo* info = state_for(x, x->state(), true);
3352 // Notify the runtime very infrequently only to take care of counter overflows
3353 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3354 }
3355 }
3357 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3358 int freq_log = 0;
3359 int level = compilation()->env()->comp_level();
3360 if (level == CompLevel_limited_profile) {
3361 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3362 } else if (level == CompLevel_full_profile) {
3363 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3364 } else {
3365 ShouldNotReachHere();
3366 }
3367 // Increment the appropriate invocation/backedge counter and notify the runtime.
3368 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3369 }
3371 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3372 ciMethod *method, int frequency,
3373 int bci, bool backedge, bool notify) {
3374 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3375 int level = _compilation->env()->comp_level();
3376 assert(level > CompLevel_simple, "Shouldn't be here");
3378 int offset = -1;
3379 LIR_Opr counter_holder = NULL;
3380 if (level == CompLevel_limited_profile) {
3381 MethodCounters* counters_adr = method->ensure_method_counters();
3382 if (counters_adr == NULL) {
3383 bailout("method counters allocation failed");
3384 return;
3385 }
3386 counter_holder = new_pointer_register();
3387 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3388 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3389 MethodCounters::invocation_counter_offset());
3390 } else if (level == CompLevel_full_profile) {
3391 counter_holder = new_register(T_METADATA);
3392 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3393 MethodData::invocation_counter_offset());
3394 ciMethodData* md = method->method_data_or_null();
3395 assert(md != NULL, "Sanity");
3396 __ metadata2reg(md->constant_encoding(), counter_holder);
3397 } else {
3398 ShouldNotReachHere();
3399 }
3400 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3401 LIR_Opr result = new_register(T_INT);
3402 __ load(counter, result);
3403 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3404 __ store(result, counter);
3405 if (notify) {
3406 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3407 LIR_Opr meth = new_register(T_METADATA);
3408 __ metadata2reg(method->constant_encoding(), meth);
3409 __ logical_and(result, mask, result);
3410 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3411 // The bci for info can point to cmp for if's we want the if bci
3412 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3413 __ branch(lir_cond_equal, T_INT, overflow);
3414 __ branch_destination(overflow->continuation());
3415 }
3416 }
3418 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3419 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3420 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3422 if (x->pass_thread()) {
3423 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3424 args->append(getThreadPointer());
3425 }
3427 for (int i = 0; i < x->number_of_arguments(); i++) {
3428 Value a = x->argument_at(i);
3429 LIRItem* item = new LIRItem(a, this);
3430 item->load_item();
3431 args->append(item->result());
3432 signature->append(as_BasicType(a->type()));
3433 }
3435 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3436 if (x->type() == voidType) {
3437 set_no_result(x);
3438 } else {
3439 __ move(result, rlock_result(x));
3440 }
3441 }
3443 #ifdef ASSERT
3444 void LIRGenerator::do_Assert(Assert *x) {
3445 ValueTag tag = x->x()->type()->tag();
3446 If::Condition cond = x->cond();
3448 LIRItem xitem(x->x(), this);
3449 LIRItem yitem(x->y(), this);
3450 LIRItem* xin = &xitem;
3451 LIRItem* yin = &yitem;
3453 assert(tag == intTag, "Only integer assertions are valid!");
3455 xin->load_item();
3456 yin->dont_load_item();
3458 set_no_result(x);
3460 LIR_Opr left = xin->result();
3461 LIR_Opr right = yin->result();
3463 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3464 }
3465 #endif
3467 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3470 Instruction *a = x->x();
3471 Instruction *b = x->y();
3472 if (!a || StressRangeCheckElimination) {
3473 assert(!b || StressRangeCheckElimination, "B must also be null");
3475 CodeEmitInfo *info = state_for(x, x->state());
3476 CodeStub* stub = new PredicateFailedStub(info);
3478 __ jump(stub);
3479 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3480 int a_int = a->type()->as_IntConstant()->value();
3481 int b_int = b->type()->as_IntConstant()->value();
3483 bool ok = false;
3485 switch(x->cond()) {
3486 case Instruction::eql: ok = (a_int == b_int); break;
3487 case Instruction::neq: ok = (a_int != b_int); break;
3488 case Instruction::lss: ok = (a_int < b_int); break;
3489 case Instruction::leq: ok = (a_int <= b_int); break;
3490 case Instruction::gtr: ok = (a_int > b_int); break;
3491 case Instruction::geq: ok = (a_int >= b_int); break;
3492 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3493 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3494 default: ShouldNotReachHere();
3495 }
3497 if (ok) {
3499 CodeEmitInfo *info = state_for(x, x->state());
3500 CodeStub* stub = new PredicateFailedStub(info);
3502 __ jump(stub);
3503 }
3504 } else {
3506 ValueTag tag = x->x()->type()->tag();
3507 If::Condition cond = x->cond();
3508 LIRItem xitem(x->x(), this);
3509 LIRItem yitem(x->y(), this);
3510 LIRItem* xin = &xitem;
3511 LIRItem* yin = &yitem;
3513 assert(tag == intTag, "Only integer deoptimizations are valid!");
3515 xin->load_item();
3516 yin->dont_load_item();
3517 set_no_result(x);
3519 LIR_Opr left = xin->result();
3520 LIR_Opr right = yin->result();
3522 CodeEmitInfo *info = state_for(x, x->state());
3523 CodeStub* stub = new PredicateFailedStub(info);
3525 __ cmp(lir_cond(cond), left, right);
3526 __ branch(lir_cond(cond), right->type(), stub);
3527 }
3528 }
3531 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3532 LIRItemList args(1);
3533 LIRItem value(arg1, this);
3534 args.append(&value);
3535 BasicTypeList signature;
3536 signature.append(as_BasicType(arg1->type()));
3538 return call_runtime(&signature, &args, entry, result_type, info);
3539 }
3542 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3543 LIRItemList args(2);
3544 LIRItem value1(arg1, this);
3545 LIRItem value2(arg2, this);
3546 args.append(&value1);
3547 args.append(&value2);
3548 BasicTypeList signature;
3549 signature.append(as_BasicType(arg1->type()));
3550 signature.append(as_BasicType(arg2->type()));
3552 return call_runtime(&signature, &args, entry, result_type, info);
3553 }
3556 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3557 address entry, ValueType* result_type, CodeEmitInfo* info) {
3558 // get a result register
3559 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3560 LIR_Opr result = LIR_OprFact::illegalOpr;
3561 if (result_type->tag() != voidTag) {
3562 result = new_register(result_type);
3563 phys_reg = result_register_for(result_type);
3564 }
3566 // move the arguments into the correct location
3567 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3568 assert(cc->length() == args->length(), "argument mismatch");
3569 for (int i = 0; i < args->length(); i++) {
3570 LIR_Opr arg = args->at(i);
3571 LIR_Opr loc = cc->at(i);
3572 if (loc->is_register()) {
3573 __ move(arg, loc);
3574 } else {
3575 LIR_Address* addr = loc->as_address_ptr();
3576 // if (!can_store_as_constant(arg)) {
3577 // LIR_Opr tmp = new_register(arg->type());
3578 // __ move(arg, tmp);
3579 // arg = tmp;
3580 // }
3581 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3582 __ unaligned_move(arg, addr);
3583 } else {
3584 __ move(arg, addr);
3585 }
3586 }
3587 }
3589 if (info) {
3590 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3591 } else {
3592 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3593 }
3594 if (result->is_valid()) {
3595 __ move(phys_reg, result);
3596 }
3597 return result;
3598 }
3601 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3602 address entry, ValueType* result_type, CodeEmitInfo* info) {
3603 // get a result register
3604 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3605 LIR_Opr result = LIR_OprFact::illegalOpr;
3606 if (result_type->tag() != voidTag) {
3607 result = new_register(result_type);
3608 phys_reg = result_register_for(result_type);
3609 }
3611 // move the arguments into the correct location
3612 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3614 assert(cc->length() == args->length(), "argument mismatch");
3615 for (int i = 0; i < args->length(); i++) {
3616 LIRItem* arg = args->at(i);
3617 LIR_Opr loc = cc->at(i);
3618 if (loc->is_register()) {
3619 arg->load_item_force(loc);
3620 } else {
3621 LIR_Address* addr = loc->as_address_ptr();
3622 arg->load_for_store(addr->type());
3623 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3624 __ unaligned_move(arg->result(), addr);
3625 } else {
3626 __ move(arg->result(), addr);
3627 }
3628 }
3629 }
3631 if (info) {
3632 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3633 } else {
3634 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3635 }
3636 if (result->is_valid()) {
3637 __ move(phys_reg, result);
3638 }
3639 return result;
3640 }
3642 void LIRGenerator::do_MemBar(MemBar* x) {
3643 if (os::is_MP()) {
3644 LIR_Code code = x->code();
3645 switch(code) {
3646 case lir_membar_acquire : __ membar_acquire(); break;
3647 case lir_membar_release : __ membar_release(); break;
3648 case lir_membar : __ membar(); break;
3649 case lir_membar_loadload : __ membar_loadload(); break;
3650 case lir_membar_storestore: __ membar_storestore(); break;
3651 case lir_membar_loadstore : __ membar_loadstore(); break;
3652 case lir_membar_storeload : __ membar_storeload(); break;
3653 default : ShouldNotReachHere(); break;
3654 }
3655 }
3656 }
3658 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3659 if (x->check_boolean()) {
3660 LIR_Opr value_fixed = rlock_byte(T_BYTE);
3661 if (TwoOperandLIRForm) {
3662 __ move(value, value_fixed);
3663 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3664 } else {
3665 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3666 }
3667 LIR_Opr klass = new_register(T_METADATA);
3668 __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3669 null_check_info = NULL;
3670 LIR_Opr layout = new_register(T_INT);
3671 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3672 int diffbit = Klass::layout_helper_boolean_diffbit();
3673 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3674 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3675 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3676 value = value_fixed;
3677 }
3678 return value;
3679 }