src/share/vm/c1/c1_LIRGenerator.cpp

Thu, 02 Dec 2010 17:21:12 -0800

author
iveresov
date
Thu, 02 Dec 2010 17:21:12 -0800
changeset 2349
5ddfcf4b079e
parent 2344
ac637b7220d1
child 2412
037c727f35fb
child 2443
df307487d610
permissions
-rw-r--r--

7003554: (tiered) assert(is_null_object() || handle() != NULL) failed: cannot embed null pointer
Summary: C1 with profiling doesn't check whether the MDO has been really allocated, which can silently fail if the perm gen is full. The solution is to check if the allocation failed and bailout out of inlining or compilation.
Reviewed-by: kvn, never

     1 /*
     2  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "c1/c1_Compilation.hpp"
    27 #include "c1/c1_FrameMap.hpp"
    28 #include "c1/c1_Instruction.hpp"
    29 #include "c1/c1_LIRAssembler.hpp"
    30 #include "c1/c1_LIRGenerator.hpp"
    31 #include "c1/c1_ValueStack.hpp"
    32 #include "ci/ciArrayKlass.hpp"
    33 #include "ci/ciCPCache.hpp"
    34 #include "ci/ciInstance.hpp"
    35 #include "runtime/sharedRuntime.hpp"
    36 #include "runtime/stubRoutines.hpp"
    37 #include "utilities/bitMap.inline.hpp"
    38 #ifndef SERIALGC
    39 #include "gc_implementation/g1/heapRegion.hpp"
    40 #endif
    42 #ifdef ASSERT
    43 #define __ gen()->lir(__FILE__, __LINE__)->
    44 #else
    45 #define __ gen()->lir()->
    46 #endif
    48 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
    49 #ifdef ARM
    50 #define PATCHED_ADDR  (204)
    51 #else
    52 #define PATCHED_ADDR  (max_jint)
    53 #endif
    55 void PhiResolverState::reset(int max_vregs) {
    56   // Initialize array sizes
    57   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
    58   _virtual_operands.trunc_to(0);
    59   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
    60   _other_operands.trunc_to(0);
    61   _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
    62   _vreg_table.trunc_to(0);
    63 }
    67 //--------------------------------------------------------------
    68 // PhiResolver
    70 // Resolves cycles:
    71 //
    72 //  r1 := r2  becomes  temp := r1
    73 //  r2 := r1           r1 := r2
    74 //                     r2 := temp
    75 // and orders moves:
    76 //
    77 //  r2 := r3  becomes  r1 := r2
    78 //  r1 := r2           r2 := r3
    80 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
    81  : _gen(gen)
    82  , _state(gen->resolver_state())
    83  , _temp(LIR_OprFact::illegalOpr)
    84 {
    85   // reinitialize the shared state arrays
    86   _state.reset(max_vregs);
    87 }
    90 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
    91   assert(src->is_valid(), "");
    92   assert(dest->is_valid(), "");
    93   __ move(src, dest);
    94 }
    97 void PhiResolver::move_temp_to(LIR_Opr dest) {
    98   assert(_temp->is_valid(), "");
    99   emit_move(_temp, dest);
   100   NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
   101 }
   104 void PhiResolver::move_to_temp(LIR_Opr src) {
   105   assert(_temp->is_illegal(), "");
   106   _temp = _gen->new_register(src->type());
   107   emit_move(src, _temp);
   108 }
   111 // Traverse assignment graph in depth first order and generate moves in post order
   112 // ie. two assignments: b := c, a := b start with node c:
   113 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
   114 // Generates moves in this order: move b to a and move c to b
   115 // ie. cycle a := b, b := a start with node a
   116 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
   117 // Generates moves in this order: move b to temp, move a to b, move temp to a
   118 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
   119   if (!dest->visited()) {
   120     dest->set_visited();
   121     for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
   122       move(dest, dest->destination_at(i));
   123     }
   124   } else if (!dest->start_node()) {
   125     // cylce in graph detected
   126     assert(_loop == NULL, "only one loop valid!");
   127     _loop = dest;
   128     move_to_temp(src->operand());
   129     return;
   130   } // else dest is a start node
   132   if (!dest->assigned()) {
   133     if (_loop == dest) {
   134       move_temp_to(dest->operand());
   135       dest->set_assigned();
   136     } else if (src != NULL) {
   137       emit_move(src->operand(), dest->operand());
   138       dest->set_assigned();
   139     }
   140   }
   141 }
   144 PhiResolver::~PhiResolver() {
   145   int i;
   146   // resolve any cycles in moves from and to virtual registers
   147   for (i = virtual_operands().length() - 1; i >= 0; i --) {
   148     ResolveNode* node = virtual_operands()[i];
   149     if (!node->visited()) {
   150       _loop = NULL;
   151       move(NULL, node);
   152       node->set_start_node();
   153       assert(_temp->is_illegal(), "move_temp_to() call missing");
   154     }
   155   }
   157   // generate move for move from non virtual register to abitrary destination
   158   for (i = other_operands().length() - 1; i >= 0; i --) {
   159     ResolveNode* node = other_operands()[i];
   160     for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
   161       emit_move(node->operand(), node->destination_at(j)->operand());
   162     }
   163   }
   164 }
   167 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
   168   ResolveNode* node;
   169   if (opr->is_virtual()) {
   170     int vreg_num = opr->vreg_number();
   171     node = vreg_table().at_grow(vreg_num, NULL);
   172     assert(node == NULL || node->operand() == opr, "");
   173     if (node == NULL) {
   174       node = new ResolveNode(opr);
   175       vreg_table()[vreg_num] = node;
   176     }
   177     // Make sure that all virtual operands show up in the list when
   178     // they are used as the source of a move.
   179     if (source && !virtual_operands().contains(node)) {
   180       virtual_operands().append(node);
   181     }
   182   } else {
   183     assert(source, "");
   184     node = new ResolveNode(opr);
   185     other_operands().append(node);
   186   }
   187   return node;
   188 }
   191 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
   192   assert(dest->is_virtual(), "");
   193   // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
   194   assert(src->is_valid(), "");
   195   assert(dest->is_valid(), "");
   196   ResolveNode* source = source_node(src);
   197   source->append(destination_node(dest));
   198 }
   201 //--------------------------------------------------------------
   202 // LIRItem
   204 void LIRItem::set_result(LIR_Opr opr) {
   205   assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
   206   value()->set_operand(opr);
   208   if (opr->is_virtual()) {
   209     _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
   210   }
   212   _result = opr;
   213 }
   215 void LIRItem::load_item() {
   216   if (result()->is_illegal()) {
   217     // update the items result
   218     _result = value()->operand();
   219   }
   220   if (!result()->is_register()) {
   221     LIR_Opr reg = _gen->new_register(value()->type());
   222     __ move(result(), reg);
   223     if (result()->is_constant()) {
   224       _result = reg;
   225     } else {
   226       set_result(reg);
   227     }
   228   }
   229 }
   232 void LIRItem::load_for_store(BasicType type) {
   233   if (_gen->can_store_as_constant(value(), type)) {
   234     _result = value()->operand();
   235     if (!_result->is_constant()) {
   236       _result = LIR_OprFact::value_type(value()->type());
   237     }
   238   } else if (type == T_BYTE || type == T_BOOLEAN) {
   239     load_byte_item();
   240   } else {
   241     load_item();
   242   }
   243 }
   245 void LIRItem::load_item_force(LIR_Opr reg) {
   246   LIR_Opr r = result();
   247   if (r != reg) {
   248 #if !defined(ARM) && !defined(E500V2)
   249     if (r->type() != reg->type()) {
   250       // moves between different types need an intervening spill slot
   251       r = _gen->force_to_spill(r, reg->type());
   252     }
   253 #endif
   254     __ move(r, reg);
   255     _result = reg;
   256   }
   257 }
   259 ciObject* LIRItem::get_jobject_constant() const {
   260   ObjectType* oc = type()->as_ObjectType();
   261   if (oc) {
   262     return oc->constant_value();
   263   }
   264   return NULL;
   265 }
   268 jint LIRItem::get_jint_constant() const {
   269   assert(is_constant() && value() != NULL, "");
   270   assert(type()->as_IntConstant() != NULL, "type check");
   271   return type()->as_IntConstant()->value();
   272 }
   275 jint LIRItem::get_address_constant() const {
   276   assert(is_constant() && value() != NULL, "");
   277   assert(type()->as_AddressConstant() != NULL, "type check");
   278   return type()->as_AddressConstant()->value();
   279 }
   282 jfloat LIRItem::get_jfloat_constant() const {
   283   assert(is_constant() && value() != NULL, "");
   284   assert(type()->as_FloatConstant() != NULL, "type check");
   285   return type()->as_FloatConstant()->value();
   286 }
   289 jdouble LIRItem::get_jdouble_constant() const {
   290   assert(is_constant() && value() != NULL, "");
   291   assert(type()->as_DoubleConstant() != NULL, "type check");
   292   return type()->as_DoubleConstant()->value();
   293 }
   296 jlong LIRItem::get_jlong_constant() const {
   297   assert(is_constant() && value() != NULL, "");
   298   assert(type()->as_LongConstant() != NULL, "type check");
   299   return type()->as_LongConstant()->value();
   300 }
   304 //--------------------------------------------------------------
   307 void LIRGenerator::init() {
   308   _bs = Universe::heap()->barrier_set();
   309 }
   312 void LIRGenerator::block_do_prolog(BlockBegin* block) {
   313 #ifndef PRODUCT
   314   if (PrintIRWithLIR) {
   315     block->print();
   316   }
   317 #endif
   319   // set up the list of LIR instructions
   320   assert(block->lir() == NULL, "LIR list already computed for this block");
   321   _lir = new LIR_List(compilation(), block);
   322   block->set_lir(_lir);
   324   __ branch_destination(block->label());
   326   if (LIRTraceExecution &&
   327       Compilation::current()->hir()->start()->block_id() != block->block_id() &&
   328       !block->is_set(BlockBegin::exception_entry_flag)) {
   329     assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
   330     trace_block_entry(block);
   331   }
   332 }
   335 void LIRGenerator::block_do_epilog(BlockBegin* block) {
   336 #ifndef PRODUCT
   337   if (PrintIRWithLIR) {
   338     tty->cr();
   339   }
   340 #endif
   342   // LIR_Opr for unpinned constants shouldn't be referenced by other
   343   // blocks so clear them out after processing the block.
   344   for (int i = 0; i < _unpinned_constants.length(); i++) {
   345     _unpinned_constants.at(i)->clear_operand();
   346   }
   347   _unpinned_constants.trunc_to(0);
   349   // clear our any registers for other local constants
   350   _constants.trunc_to(0);
   351   _reg_for_constants.trunc_to(0);
   352 }
   355 void LIRGenerator::block_do(BlockBegin* block) {
   356   CHECK_BAILOUT();
   358   block_do_prolog(block);
   359   set_block(block);
   361   for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
   362     if (instr->is_pinned()) do_root(instr);
   363   }
   365   set_block(NULL);
   366   block_do_epilog(block);
   367 }
   370 //-------------------------LIRGenerator-----------------------------
   372 // This is where the tree-walk starts; instr must be root;
   373 void LIRGenerator::do_root(Value instr) {
   374   CHECK_BAILOUT();
   376   InstructionMark im(compilation(), instr);
   378   assert(instr->is_pinned(), "use only with roots");
   379   assert(instr->subst() == instr, "shouldn't have missed substitution");
   381   instr->visit(this);
   383   assert(!instr->has_uses() || instr->operand()->is_valid() ||
   384          instr->as_Constant() != NULL || bailed_out(), "invalid item set");
   385 }
   388 // This is called for each node in tree; the walk stops if a root is reached
   389 void LIRGenerator::walk(Value instr) {
   390   InstructionMark im(compilation(), instr);
   391   //stop walk when encounter a root
   392   if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
   393     assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
   394   } else {
   395     assert(instr->subst() == instr, "shouldn't have missed substitution");
   396     instr->visit(this);
   397     // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
   398   }
   399 }
   402 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
   403   assert(state != NULL, "state must be defined");
   405   ValueStack* s = state;
   406   for_each_state(s) {
   407     if (s->kind() == ValueStack::EmptyExceptionState) {
   408       assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
   409       continue;
   410     }
   412     int index;
   413     Value value;
   414     for_each_stack_value(s, index, value) {
   415       assert(value->subst() == value, "missed substitution");
   416       if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
   417         walk(value);
   418         assert(value->operand()->is_valid(), "must be evaluated now");
   419       }
   420     }
   422     int bci = s->bci();
   423     IRScope* scope = s->scope();
   424     ciMethod* method = scope->method();
   426     MethodLivenessResult liveness = method->liveness_at_bci(bci);
   427     if (bci == SynchronizationEntryBCI) {
   428       if (x->as_ExceptionObject() || x->as_Throw()) {
   429         // all locals are dead on exit from the synthetic unlocker
   430         liveness.clear();
   431       } else {
   432         assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
   433       }
   434     }
   435     if (!liveness.is_valid()) {
   436       // Degenerate or breakpointed method.
   437       bailout("Degenerate or breakpointed method");
   438     } else {
   439       assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
   440       for_each_local_value(s, index, value) {
   441         assert(value->subst() == value, "missed substition");
   442         if (liveness.at(index) && !value->type()->is_illegal()) {
   443           if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
   444             walk(value);
   445             assert(value->operand()->is_valid(), "must be evaluated now");
   446           }
   447         } else {
   448           // NULL out this local so that linear scan can assume that all non-NULL values are live.
   449           s->invalidate_local(index);
   450         }
   451       }
   452     }
   453   }
   455   return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers());
   456 }
   459 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
   460   return state_for(x, x->exception_state());
   461 }
   464 void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
   465   if (!obj->is_loaded() || PatchALot) {
   466     assert(info != NULL, "info must be set if class is not loaded");
   467     __ oop2reg_patch(NULL, r, info);
   468   } else {
   469     // no patching needed
   470     __ oop2reg(obj->constant_encoding(), r);
   471   }
   472 }
   475 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
   476                                     CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
   477   CodeStub* stub = new RangeCheckStub(range_check_info, index);
   478   if (index->is_constant()) {
   479     cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
   480                 index->as_jint(), null_check_info);
   481     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
   482   } else {
   483     cmp_reg_mem(lir_cond_aboveEqual, index, array,
   484                 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
   485     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
   486   }
   487 }
   490 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
   491   CodeStub* stub = new RangeCheckStub(info, index, true);
   492   if (index->is_constant()) {
   493     cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
   494     __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
   495   } else {
   496     cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
   497                 java_nio_Buffer::limit_offset(), T_INT, info);
   498     __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
   499   }
   500   __ move(index, result);
   501 }
   505 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
   506   LIR_Opr result_op = result;
   507   LIR_Opr left_op   = left;
   508   LIR_Opr right_op  = right;
   510   if (TwoOperandLIRForm && left_op != result_op) {
   511     assert(right_op != result_op, "malformed");
   512     __ move(left_op, result_op);
   513     left_op = result_op;
   514   }
   516   switch(code) {
   517     case Bytecodes::_dadd:
   518     case Bytecodes::_fadd:
   519     case Bytecodes::_ladd:
   520     case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
   521     case Bytecodes::_fmul:
   522     case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
   524     case Bytecodes::_dmul:
   525       {
   526         if (is_strictfp) {
   527           __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
   528         } else {
   529           __ mul(left_op, right_op, result_op); break;
   530         }
   531       }
   532       break;
   534     case Bytecodes::_imul:
   535       {
   536         bool    did_strength_reduce = false;
   538         if (right->is_constant()) {
   539           int c = right->as_jint();
   540           if (is_power_of_2(c)) {
   541             // do not need tmp here
   542             __ shift_left(left_op, exact_log2(c), result_op);
   543             did_strength_reduce = true;
   544           } else {
   545             did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
   546           }
   547         }
   548         // we couldn't strength reduce so just emit the multiply
   549         if (!did_strength_reduce) {
   550           __ mul(left_op, right_op, result_op);
   551         }
   552       }
   553       break;
   555     case Bytecodes::_dsub:
   556     case Bytecodes::_fsub:
   557     case Bytecodes::_lsub:
   558     case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
   560     case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
   561     // ldiv and lrem are implemented with a direct runtime call
   563     case Bytecodes::_ddiv:
   564       {
   565         if (is_strictfp) {
   566           __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
   567         } else {
   568           __ div (left_op, right_op, result_op); break;
   569         }
   570       }
   571       break;
   573     case Bytecodes::_drem:
   574     case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
   576     default: ShouldNotReachHere();
   577   }
   578 }
   581 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
   582   arithmetic_op(code, result, left, right, false, tmp);
   583 }
   586 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
   587   arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
   588 }
   591 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
   592   arithmetic_op(code, result, left, right, is_strictfp, tmp);
   593 }
   596 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
   597   if (TwoOperandLIRForm && value != result_op) {
   598     assert(count != result_op, "malformed");
   599     __ move(value, result_op);
   600     value = result_op;
   601   }
   603   assert(count->is_constant() || count->is_register(), "must be");
   604   switch(code) {
   605   case Bytecodes::_ishl:
   606   case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
   607   case Bytecodes::_ishr:
   608   case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
   609   case Bytecodes::_iushr:
   610   case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
   611   default: ShouldNotReachHere();
   612   }
   613 }
   616 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
   617   if (TwoOperandLIRForm && left_op != result_op) {
   618     assert(right_op != result_op, "malformed");
   619     __ move(left_op, result_op);
   620     left_op = result_op;
   621   }
   623   switch(code) {
   624     case Bytecodes::_iand:
   625     case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
   627     case Bytecodes::_ior:
   628     case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
   630     case Bytecodes::_ixor:
   631     case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
   633     default: ShouldNotReachHere();
   634   }
   635 }
   638 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
   639   if (!GenerateSynchronizationCode) return;
   640   // for slow path, use debug info for state after successful locking
   641   CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
   642   __ load_stack_address_monitor(monitor_no, lock);
   643   // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
   644   __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
   645 }
   648 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
   649   if (!GenerateSynchronizationCode) return;
   650   // setup registers
   651   LIR_Opr hdr = lock;
   652   lock = new_hdr;
   653   CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
   654   __ load_stack_address_monitor(monitor_no, lock);
   655   __ unlock_object(hdr, object, lock, scratch, slow_path);
   656 }
   659 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
   660   jobject2reg_with_patching(klass_reg, klass, info);
   661   // If klass is not loaded we do not know if the klass has finalizers:
   662   if (UseFastNewInstance && klass->is_loaded()
   663       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
   665     Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
   667     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
   669     assert(klass->is_loaded(), "must be loaded");
   670     // allocate space for instance
   671     assert(klass->size_helper() >= 0, "illegal instance size");
   672     const int instance_size = align_object_size(klass->size_helper());
   673     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
   674                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
   675   } else {
   676     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
   677     __ branch(lir_cond_always, T_ILLEGAL, slow_path);
   678     __ branch_destination(slow_path->continuation());
   679   }
   680 }
   683 static bool is_constant_zero(Instruction* inst) {
   684   IntConstant* c = inst->type()->as_IntConstant();
   685   if (c) {
   686     return (c->value() == 0);
   687   }
   688   return false;
   689 }
   692 static bool positive_constant(Instruction* inst) {
   693   IntConstant* c = inst->type()->as_IntConstant();
   694   if (c) {
   695     return (c->value() >= 0);
   696   }
   697   return false;
   698 }
   701 static ciArrayKlass* as_array_klass(ciType* type) {
   702   if (type != NULL && type->is_array_klass() && type->is_loaded()) {
   703     return (ciArrayKlass*)type;
   704   } else {
   705     return NULL;
   706   }
   707 }
   709 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
   710   Instruction* src     = x->argument_at(0);
   711   Instruction* src_pos = x->argument_at(1);
   712   Instruction* dst     = x->argument_at(2);
   713   Instruction* dst_pos = x->argument_at(3);
   714   Instruction* length  = x->argument_at(4);
   716   // first try to identify the likely type of the arrays involved
   717   ciArrayKlass* expected_type = NULL;
   718   bool is_exact = false;
   719   {
   720     ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
   721     ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
   722     ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
   723     ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
   724     if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
   725       // the types exactly match so the type is fully known
   726       is_exact = true;
   727       expected_type = src_exact_type;
   728     } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
   729       ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
   730       ciArrayKlass* src_type = NULL;
   731       if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
   732         src_type = (ciArrayKlass*) src_exact_type;
   733       } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
   734         src_type = (ciArrayKlass*) src_declared_type;
   735       }
   736       if (src_type != NULL) {
   737         if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
   738           is_exact = true;
   739           expected_type = dst_type;
   740         }
   741       }
   742     }
   743     // at least pass along a good guess
   744     if (expected_type == NULL) expected_type = dst_exact_type;
   745     if (expected_type == NULL) expected_type = src_declared_type;
   746     if (expected_type == NULL) expected_type = dst_declared_type;
   747   }
   749   // if a probable array type has been identified, figure out if any
   750   // of the required checks for a fast case can be elided.
   751   int flags = LIR_OpArrayCopy::all_flags;
   752   if (expected_type != NULL) {
   753     // try to skip null checks
   754     if (src->as_NewArray() != NULL)
   755       flags &= ~LIR_OpArrayCopy::src_null_check;
   756     if (dst->as_NewArray() != NULL)
   757       flags &= ~LIR_OpArrayCopy::dst_null_check;
   759     // check from incoming constant values
   760     if (positive_constant(src_pos))
   761       flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
   762     if (positive_constant(dst_pos))
   763       flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
   764     if (positive_constant(length))
   765       flags &= ~LIR_OpArrayCopy::length_positive_check;
   767     // see if the range check can be elided, which might also imply
   768     // that src or dst is non-null.
   769     ArrayLength* al = length->as_ArrayLength();
   770     if (al != NULL) {
   771       if (al->array() == src) {
   772         // it's the length of the source array
   773         flags &= ~LIR_OpArrayCopy::length_positive_check;
   774         flags &= ~LIR_OpArrayCopy::src_null_check;
   775         if (is_constant_zero(src_pos))
   776           flags &= ~LIR_OpArrayCopy::src_range_check;
   777       }
   778       if (al->array() == dst) {
   779         // it's the length of the destination array
   780         flags &= ~LIR_OpArrayCopy::length_positive_check;
   781         flags &= ~LIR_OpArrayCopy::dst_null_check;
   782         if (is_constant_zero(dst_pos))
   783           flags &= ~LIR_OpArrayCopy::dst_range_check;
   784       }
   785     }
   786     if (is_exact) {
   787       flags &= ~LIR_OpArrayCopy::type_check;
   788     }
   789   }
   791   if (src == dst) {
   792     // moving within a single array so no type checks are needed
   793     if (flags & LIR_OpArrayCopy::type_check) {
   794       flags &= ~LIR_OpArrayCopy::type_check;
   795     }
   796   }
   797   *flagsp = flags;
   798   *expected_typep = (ciArrayKlass*)expected_type;
   799 }
   802 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
   803   assert(opr->is_register(), "why spill if item is not register?");
   805   if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
   806     LIR_Opr result = new_register(T_FLOAT);
   807     set_vreg_flag(result, must_start_in_memory);
   808     assert(opr->is_register(), "only a register can be spilled");
   809     assert(opr->value_type()->is_float(), "rounding only for floats available");
   810     __ roundfp(opr, LIR_OprFact::illegalOpr, result);
   811     return result;
   812   }
   813   return opr;
   814 }
   817 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
   818   assert(type2size[t] == type2size[value->type()], "size mismatch");
   819   if (!value->is_register()) {
   820     // force into a register
   821     LIR_Opr r = new_register(value->type());
   822     __ move(value, r);
   823     value = r;
   824   }
   826   // create a spill location
   827   LIR_Opr tmp = new_register(t);
   828   set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
   830   // move from register to spill
   831   __ move(value, tmp);
   832   return tmp;
   833 }
   835 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
   836   if (if_instr->should_profile()) {
   837     ciMethod* method = if_instr->profiled_method();
   838     assert(method != NULL, "method should be set if branch is profiled");
   839     ciMethodData* md = method->method_data_or_null();
   840     assert(md != NULL, "Sanity");
   841     ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
   842     assert(data != NULL, "must have profiling data");
   843     assert(data->is_BranchData(), "need BranchData for two-way branches");
   844     int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
   845     int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
   846     if (if_instr->is_swapped()) {
   847       int t = taken_count_offset;
   848       taken_count_offset = not_taken_count_offset;
   849       not_taken_count_offset = t;
   850     }
   852     LIR_Opr md_reg = new_register(T_OBJECT);
   853     __ oop2reg(md->constant_encoding(), md_reg);
   855     LIR_Opr data_offset_reg = new_pointer_register();
   856     __ cmove(lir_cond(cond),
   857              LIR_OprFact::intptrConst(taken_count_offset),
   858              LIR_OprFact::intptrConst(not_taken_count_offset),
   859              data_offset_reg);
   861     // MDO cells are intptr_t, so the data_reg width is arch-dependent.
   862     LIR_Opr data_reg = new_pointer_register();
   863     LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
   864     __ move(data_addr, data_reg);
   865     // Use leal instead of add to avoid destroying condition codes on x86
   866     LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
   867     __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
   868     __ move(data_reg, data_addr);
   869   }
   870 }
   872 // Phi technique:
   873 // This is about passing live values from one basic block to the other.
   874 // In code generated with Java it is rather rare that more than one
   875 // value is on the stack from one basic block to the other.
   876 // We optimize our technique for efficient passing of one value
   877 // (of type long, int, double..) but it can be extended.
   878 // When entering or leaving a basic block, all registers and all spill
   879 // slots are release and empty. We use the released registers
   880 // and spill slots to pass the live values from one block
   881 // to the other. The topmost value, i.e., the value on TOS of expression
   882 // stack is passed in registers. All other values are stored in spilling
   883 // area. Every Phi has an index which designates its spill slot
   884 // At exit of a basic block, we fill the register(s) and spill slots.
   885 // At entry of a basic block, the block_prolog sets up the content of phi nodes
   886 // and locks necessary registers and spilling slots.
   889 // move current value to referenced phi function
   890 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
   891   Phi* phi = sux_val->as_Phi();
   892   // cur_val can be null without phi being null in conjunction with inlining
   893   if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
   894     LIR_Opr operand = cur_val->operand();
   895     if (cur_val->operand()->is_illegal()) {
   896       assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
   897              "these can be produced lazily");
   898       operand = operand_for_instruction(cur_val);
   899     }
   900     resolver->move(operand, operand_for_instruction(phi));
   901   }
   902 }
   905 // Moves all stack values into their PHI position
   906 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
   907   BlockBegin* bb = block();
   908   if (bb->number_of_sux() == 1) {
   909     BlockBegin* sux = bb->sux_at(0);
   910     assert(sux->number_of_preds() > 0, "invalid CFG");
   912     // a block with only one predecessor never has phi functions
   913     if (sux->number_of_preds() > 1) {
   914       int max_phis = cur_state->stack_size() + cur_state->locals_size();
   915       PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
   917       ValueStack* sux_state = sux->state();
   918       Value sux_value;
   919       int index;
   921       assert(cur_state->scope() == sux_state->scope(), "not matching");
   922       assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
   923       assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
   925       for_each_stack_value(sux_state, index, sux_value) {
   926         move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
   927       }
   929       for_each_local_value(sux_state, index, sux_value) {
   930         move_to_phi(&resolver, cur_state->local_at(index), sux_value);
   931       }
   933       assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
   934     }
   935   }
   936 }
   939 LIR_Opr LIRGenerator::new_register(BasicType type) {
   940   int vreg = _virtual_register_number;
   941   // add a little fudge factor for the bailout, since the bailout is
   942   // only checked periodically.  This gives a few extra registers to
   943   // hand out before we really run out, which helps us keep from
   944   // tripping over assertions.
   945   if (vreg + 20 >= LIR_OprDesc::vreg_max) {
   946     bailout("out of virtual registers");
   947     if (vreg + 2 >= LIR_OprDesc::vreg_max) {
   948       // wrap it around
   949       _virtual_register_number = LIR_OprDesc::vreg_base;
   950     }
   951   }
   952   _virtual_register_number += 1;
   953   return LIR_OprFact::virtual_register(vreg, type);
   954 }
   957 // Try to lock using register in hint
   958 LIR_Opr LIRGenerator::rlock(Value instr) {
   959   return new_register(instr->type());
   960 }
   963 // does an rlock and sets result
   964 LIR_Opr LIRGenerator::rlock_result(Value x) {
   965   LIR_Opr reg = rlock(x);
   966   set_result(x, reg);
   967   return reg;
   968 }
   971 // does an rlock and sets result
   972 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
   973   LIR_Opr reg;
   974   switch (type) {
   975   case T_BYTE:
   976   case T_BOOLEAN:
   977     reg = rlock_byte(type);
   978     break;
   979   default:
   980     reg = rlock(x);
   981     break;
   982   }
   984   set_result(x, reg);
   985   return reg;
   986 }
   989 //---------------------------------------------------------------------
   990 ciObject* LIRGenerator::get_jobject_constant(Value value) {
   991   ObjectType* oc = value->type()->as_ObjectType();
   992   if (oc) {
   993     return oc->constant_value();
   994   }
   995   return NULL;
   996 }
   999 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
  1000   assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
  1001   assert(block()->next() == x, "ExceptionObject must be first instruction of block");
  1003   // no moves are created for phi functions at the begin of exception
  1004   // handlers, so assign operands manually here
  1005   for_each_phi_fun(block(), phi,
  1006                    operand_for_instruction(phi));
  1008   LIR_Opr thread_reg = getThreadPointer();
  1009   __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
  1010                exceptionOopOpr());
  1011   __ move_wide(LIR_OprFact::oopConst(NULL),
  1012                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
  1013   __ move_wide(LIR_OprFact::oopConst(NULL),
  1014                new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
  1016   LIR_Opr result = new_register(T_OBJECT);
  1017   __ move(exceptionOopOpr(), result);
  1018   set_result(x, result);
  1022 //----------------------------------------------------------------------
  1023 //----------------------------------------------------------------------
  1024 //----------------------------------------------------------------------
  1025 //----------------------------------------------------------------------
  1026 //                        visitor functions
  1027 //----------------------------------------------------------------------
  1028 //----------------------------------------------------------------------
  1029 //----------------------------------------------------------------------
  1030 //----------------------------------------------------------------------
  1032 void LIRGenerator::do_Phi(Phi* x) {
  1033   // phi functions are never visited directly
  1034   ShouldNotReachHere();
  1038 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
  1039 void LIRGenerator::do_Constant(Constant* x) {
  1040   if (x->state_before() != NULL) {
  1041     // Any constant with a ValueStack requires patching so emit the patch here
  1042     LIR_Opr reg = rlock_result(x);
  1043     CodeEmitInfo* info = state_for(x, x->state_before());
  1044     __ oop2reg_patch(NULL, reg, info);
  1045   } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
  1046     if (!x->is_pinned()) {
  1047       // unpinned constants are handled specially so that they can be
  1048       // put into registers when they are used multiple times within a
  1049       // block.  After the block completes their operand will be
  1050       // cleared so that other blocks can't refer to that register.
  1051       set_result(x, load_constant(x));
  1052     } else {
  1053       LIR_Opr res = x->operand();
  1054       if (!res->is_valid()) {
  1055         res = LIR_OprFact::value_type(x->type());
  1057       if (res->is_constant()) {
  1058         LIR_Opr reg = rlock_result(x);
  1059         __ move(res, reg);
  1060       } else {
  1061         set_result(x, res);
  1064   } else {
  1065     set_result(x, LIR_OprFact::value_type(x->type()));
  1070 void LIRGenerator::do_Local(Local* x) {
  1071   // operand_for_instruction has the side effect of setting the result
  1072   // so there's no need to do it here.
  1073   operand_for_instruction(x);
  1077 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
  1078   Unimplemented();
  1082 void LIRGenerator::do_Return(Return* x) {
  1083   if (compilation()->env()->dtrace_method_probes()) {
  1084     BasicTypeList signature;
  1085     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
  1086     signature.append(T_OBJECT); // methodOop
  1087     LIR_OprList* args = new LIR_OprList();
  1088     args->append(getThreadPointer());
  1089     LIR_Opr meth = new_register(T_OBJECT);
  1090     __ oop2reg(method()->constant_encoding(), meth);
  1091     args->append(meth);
  1092     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
  1095   if (x->type()->is_void()) {
  1096     __ return_op(LIR_OprFact::illegalOpr);
  1097   } else {
  1098     LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
  1099     LIRItem result(x->result(), this);
  1101     result.load_item_force(reg);
  1102     __ return_op(result.result());
  1104   set_no_result(x);
  1108 // Example: object.getClass ()
  1109 void LIRGenerator::do_getClass(Intrinsic* x) {
  1110   assert(x->number_of_arguments() == 1, "wrong type");
  1112   LIRItem rcvr(x->argument_at(0), this);
  1113   rcvr.load_item();
  1114   LIR_Opr result = rlock_result(x);
  1116   // need to perform the null check on the rcvr
  1117   CodeEmitInfo* info = NULL;
  1118   if (x->needs_null_check()) {
  1119     info = state_for(x);
  1121   __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
  1122   __ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
  1123                                klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
  1127 // Example: Thread.currentThread()
  1128 void LIRGenerator::do_currentThread(Intrinsic* x) {
  1129   assert(x->number_of_arguments() == 0, "wrong type");
  1130   LIR_Opr reg = rlock_result(x);
  1131   __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
  1135 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
  1136   assert(x->number_of_arguments() == 1, "wrong type");
  1137   LIRItem receiver(x->argument_at(0), this);
  1139   receiver.load_item();
  1140   BasicTypeList signature;
  1141   signature.append(T_OBJECT); // receiver
  1142   LIR_OprList* args = new LIR_OprList();
  1143   args->append(receiver.result());
  1144   CodeEmitInfo* info = state_for(x, x->state());
  1145   call_runtime(&signature, args,
  1146                CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
  1147                voidType, info);
  1149   set_no_result(x);
  1153 //------------------------local access--------------------------------------
  1155 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
  1156   if (x->operand()->is_illegal()) {
  1157     Constant* c = x->as_Constant();
  1158     if (c != NULL) {
  1159       x->set_operand(LIR_OprFact::value_type(c->type()));
  1160     } else {
  1161       assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
  1162       // allocate a virtual register for this local or phi
  1163       x->set_operand(rlock(x));
  1164       _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
  1167   return x->operand();
  1171 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
  1172   if (opr->is_virtual()) {
  1173     return instruction_for_vreg(opr->vreg_number());
  1175   return NULL;
  1179 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
  1180   if (reg_num < _instruction_for_operand.length()) {
  1181     return _instruction_for_operand.at(reg_num);
  1183   return NULL;
  1187 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
  1188   if (_vreg_flags.size_in_bits() == 0) {
  1189     BitMap2D temp(100, num_vreg_flags);
  1190     temp.clear();
  1191     _vreg_flags = temp;
  1193   _vreg_flags.at_put_grow(vreg_num, f, true);
  1196 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
  1197   if (!_vreg_flags.is_valid_index(vreg_num, f)) {
  1198     return false;
  1200   return _vreg_flags.at(vreg_num, f);
  1204 // Block local constant handling.  This code is useful for keeping
  1205 // unpinned constants and constants which aren't exposed in the IR in
  1206 // registers.  Unpinned Constant instructions have their operands
  1207 // cleared when the block is finished so that other blocks can't end
  1208 // up referring to their registers.
  1210 LIR_Opr LIRGenerator::load_constant(Constant* x) {
  1211   assert(!x->is_pinned(), "only for unpinned constants");
  1212   _unpinned_constants.append(x);
  1213   return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
  1217 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
  1218   BasicType t = c->type();
  1219   for (int i = 0; i < _constants.length(); i++) {
  1220     LIR_Const* other = _constants.at(i);
  1221     if (t == other->type()) {
  1222       switch (t) {
  1223       case T_INT:
  1224       case T_FLOAT:
  1225         if (c->as_jint_bits() != other->as_jint_bits()) continue;
  1226         break;
  1227       case T_LONG:
  1228       case T_DOUBLE:
  1229         if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
  1230         if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
  1231         break;
  1232       case T_OBJECT:
  1233         if (c->as_jobject() != other->as_jobject()) continue;
  1234         break;
  1236       return _reg_for_constants.at(i);
  1240   LIR_Opr result = new_register(t);
  1241   __ move((LIR_Opr)c, result);
  1242   _constants.append(c);
  1243   _reg_for_constants.append(result);
  1244   return result;
  1247 // Various barriers
  1249 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch,  CodeEmitInfo* info) {
  1250   // Do the pre-write barrier, if any.
  1251   switch (_bs->kind()) {
  1252 #ifndef SERIALGC
  1253     case BarrierSet::G1SATBCT:
  1254     case BarrierSet::G1SATBCTLogging:
  1255       G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info);
  1256       break;
  1257 #endif // SERIALGC
  1258     case BarrierSet::CardTableModRef:
  1259     case BarrierSet::CardTableExtension:
  1260       // No pre barriers
  1261       break;
  1262     case BarrierSet::ModRef:
  1263     case BarrierSet::Other:
  1264       // No pre barriers
  1265       break;
  1266     default      :
  1267       ShouldNotReachHere();
  1272 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  1273   switch (_bs->kind()) {
  1274 #ifndef SERIALGC
  1275     case BarrierSet::G1SATBCT:
  1276     case BarrierSet::G1SATBCTLogging:
  1277       G1SATBCardTableModRef_post_barrier(addr,  new_val);
  1278       break;
  1279 #endif // SERIALGC
  1280     case BarrierSet::CardTableModRef:
  1281     case BarrierSet::CardTableExtension:
  1282       CardTableModRef_post_barrier(addr,  new_val);
  1283       break;
  1284     case BarrierSet::ModRef:
  1285     case BarrierSet::Other:
  1286       // No post barriers
  1287       break;
  1288     default      :
  1289       ShouldNotReachHere();
  1293 ////////////////////////////////////////////////////////////////////////
  1294 #ifndef SERIALGC
  1296 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch,  CodeEmitInfo* info) {
  1297   if (G1DisablePreBarrier) return;
  1299   // First we test whether marking is in progress.
  1300   BasicType flag_type;
  1301   if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
  1302     flag_type = T_INT;
  1303   } else {
  1304     guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
  1305               "Assumption");
  1306     flag_type = T_BYTE;
  1308   LIR_Opr thrd = getThreadPointer();
  1309   LIR_Address* mark_active_flag_addr =
  1310     new LIR_Address(thrd,
  1311                     in_bytes(JavaThread::satb_mark_queue_offset() +
  1312                              PtrQueue::byte_offset_of_active()),
  1313                     flag_type);
  1314   // Read the marking-in-progress flag.
  1315   LIR_Opr flag_val = new_register(T_INT);
  1316   __ load(mark_active_flag_addr, flag_val);
  1318   LIR_PatchCode pre_val_patch_code =
  1319     patch ? lir_patch_normal : lir_patch_none;
  1321   LIR_Opr pre_val = new_register(T_OBJECT);
  1323   __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
  1324   if (!addr_opr->is_address()) {
  1325     assert(addr_opr->is_register(), "must be");
  1326     addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
  1328   CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code,
  1329                                         info);
  1330   __ branch(lir_cond_notEqual, T_INT, slow);
  1331   __ branch_destination(slow->continuation());
  1334 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  1335   if (G1DisablePostBarrier) return;
  1337   // If the "new_val" is a constant NULL, no barrier is necessary.
  1338   if (new_val->is_constant() &&
  1339       new_val->as_constant_ptr()->as_jobject() == NULL) return;
  1341   if (!new_val->is_register()) {
  1342     LIR_Opr new_val_reg = new_register(T_OBJECT);
  1343     if (new_val->is_constant()) {
  1344       __ move(new_val, new_val_reg);
  1345     } else {
  1346       __ leal(new_val, new_val_reg);
  1348     new_val = new_val_reg;
  1350   assert(new_val->is_register(), "must be a register at this point");
  1352   if (addr->is_address()) {
  1353     LIR_Address* address = addr->as_address_ptr();
  1354     LIR_Opr ptr = new_register(T_OBJECT);
  1355     if (!address->index()->is_valid() && address->disp() == 0) {
  1356       __ move(address->base(), ptr);
  1357     } else {
  1358       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
  1359       __ leal(addr, ptr);
  1361     addr = ptr;
  1363   assert(addr->is_register(), "must be a register at this point");
  1365   LIR_Opr xor_res = new_pointer_register();
  1366   LIR_Opr xor_shift_res = new_pointer_register();
  1367   if (TwoOperandLIRForm ) {
  1368     __ move(addr, xor_res);
  1369     __ logical_xor(xor_res, new_val, xor_res);
  1370     __ move(xor_res, xor_shift_res);
  1371     __ unsigned_shift_right(xor_shift_res,
  1372                             LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
  1373                             xor_shift_res,
  1374                             LIR_OprDesc::illegalOpr());
  1375   } else {
  1376     __ logical_xor(addr, new_val, xor_res);
  1377     __ unsigned_shift_right(xor_res,
  1378                             LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
  1379                             xor_shift_res,
  1380                             LIR_OprDesc::illegalOpr());
  1383   if (!new_val->is_register()) {
  1384     LIR_Opr new_val_reg = new_register(T_OBJECT);
  1385     __ leal(new_val, new_val_reg);
  1386     new_val = new_val_reg;
  1388   assert(new_val->is_register(), "must be a register at this point");
  1390   __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
  1392   CodeStub* slow = new G1PostBarrierStub(addr, new_val);
  1393   __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
  1394   __ branch_destination(slow->continuation());
  1397 #endif // SERIALGC
  1398 ////////////////////////////////////////////////////////////////////////
  1400 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  1402   assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
  1403   LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
  1404   if (addr->is_address()) {
  1405     LIR_Address* address = addr->as_address_ptr();
  1406     LIR_Opr ptr = new_register(T_OBJECT);
  1407     if (!address->index()->is_valid() && address->disp() == 0) {
  1408       __ move(address->base(), ptr);
  1409     } else {
  1410       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
  1411       __ leal(addr, ptr);
  1413     addr = ptr;
  1415   assert(addr->is_register(), "must be a register at this point");
  1417 #ifdef ARM
  1418   // TODO: ARM - move to platform-dependent code
  1419   LIR_Opr tmp = FrameMap::R14_opr;
  1420   if (VM_Version::supports_movw()) {
  1421     __ move((LIR_Opr)card_table_base, tmp);
  1422   } else {
  1423     __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
  1426   CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
  1427   LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
  1428   if(((int)ct->byte_map_base & 0xff) == 0) {
  1429     __ move(tmp, card_addr);
  1430   } else {
  1431     LIR_Opr tmp_zero = new_register(T_INT);
  1432     __ move(LIR_OprFact::intConst(0), tmp_zero);
  1433     __ move(tmp_zero, card_addr);
  1435 #else // ARM
  1436   LIR_Opr tmp = new_pointer_register();
  1437   if (TwoOperandLIRForm) {
  1438     __ move(addr, tmp);
  1439     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
  1440   } else {
  1441     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
  1443   if (can_inline_as_constant(card_table_base)) {
  1444     __ move(LIR_OprFact::intConst(0),
  1445               new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
  1446   } else {
  1447     __ move(LIR_OprFact::intConst(0),
  1448               new LIR_Address(tmp, load_constant(card_table_base),
  1449                               T_BYTE));
  1451 #endif // ARM
  1455 //------------------------field access--------------------------------------
  1457 // Comment copied form templateTable_i486.cpp
  1458 // ----------------------------------------------------------------------------
  1459 // Volatile variables demand their effects be made known to all CPU's in
  1460 // order.  Store buffers on most chips allow reads & writes to reorder; the
  1461 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
  1462 // memory barrier (i.e., it's not sufficient that the interpreter does not
  1463 // reorder volatile references, the hardware also must not reorder them).
  1464 //
  1465 // According to the new Java Memory Model (JMM):
  1466 // (1) All volatiles are serialized wrt to each other.
  1467 // ALSO reads & writes act as aquire & release, so:
  1468 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
  1469 // the read float up to before the read.  It's OK for non-volatile memory refs
  1470 // that happen before the volatile read to float down below it.
  1471 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
  1472 // that happen BEFORE the write float down to after the write.  It's OK for
  1473 // non-volatile memory refs that happen after the volatile write to float up
  1474 // before it.
  1475 //
  1476 // We only put in barriers around volatile refs (they are expensive), not
  1477 // _between_ memory refs (that would require us to track the flavor of the
  1478 // previous memory refs).  Requirements (2) and (3) require some barriers
  1479 // before volatile stores and after volatile loads.  These nearly cover
  1480 // requirement (1) but miss the volatile-store-volatile-load case.  This final
  1481 // case is placed after volatile-stores although it could just as well go
  1482 // before volatile-loads.
  1485 void LIRGenerator::do_StoreField(StoreField* x) {
  1486   bool needs_patching = x->needs_patching();
  1487   bool is_volatile = x->field()->is_volatile();
  1488   BasicType field_type = x->field_type();
  1489   bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
  1491   CodeEmitInfo* info = NULL;
  1492   if (needs_patching) {
  1493     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
  1494     info = state_for(x, x->state_before());
  1495   } else if (x->needs_null_check()) {
  1496     NullCheck* nc = x->explicit_null_check();
  1497     if (nc == NULL) {
  1498       info = state_for(x);
  1499     } else {
  1500       info = state_for(nc);
  1505   LIRItem object(x->obj(), this);
  1506   LIRItem value(x->value(),  this);
  1508   object.load_item();
  1510   if (is_volatile || needs_patching) {
  1511     // load item if field is volatile (fewer special cases for volatiles)
  1512     // load item if field not initialized
  1513     // load item if field not constant
  1514     // because of code patching we cannot inline constants
  1515     if (field_type == T_BYTE || field_type == T_BOOLEAN) {
  1516       value.load_byte_item();
  1517     } else  {
  1518       value.load_item();
  1520   } else {
  1521     value.load_for_store(field_type);
  1524   set_no_result(x);
  1526 #ifndef PRODUCT
  1527   if (PrintNotLoaded && needs_patching) {
  1528     tty->print_cr("   ###class not loaded at store_%s bci %d",
  1529                   x->is_static() ?  "static" : "field", x->printable_bci());
  1531 #endif
  1533   if (x->needs_null_check() &&
  1534       (needs_patching ||
  1535        MacroAssembler::needs_explicit_null_check(x->offset()))) {
  1536     // emit an explicit null check because the offset is too large
  1537     __ null_check(object.result(), new CodeEmitInfo(info));
  1540   LIR_Address* address;
  1541   if (needs_patching) {
  1542     // we need to patch the offset in the instruction so don't allow
  1543     // generate_address to try to be smart about emitting the -1.
  1544     // Otherwise the patching code won't know how to find the
  1545     // instruction to patch.
  1546     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
  1547   } else {
  1548     address = generate_address(object.result(), x->offset(), field_type);
  1551   if (is_volatile && os::is_MP()) {
  1552     __ membar_release();
  1555   if (is_oop) {
  1556     // Do the pre-write barrier, if any.
  1557     pre_barrier(LIR_OprFact::address(address),
  1558                 needs_patching,
  1559                 (info ? new CodeEmitInfo(info) : NULL));
  1562   if (is_volatile) {
  1563     assert(!needs_patching && x->is_loaded(),
  1564            "how do we know it's volatile if it's not loaded");
  1565     volatile_field_store(value.result(), address, info);
  1566   } else {
  1567     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
  1568     __ store(value.result(), address, info, patch_code);
  1571   if (is_oop) {
  1572     // Store to object so mark the card of the header
  1573     post_barrier(object.result(), value.result());
  1576   if (is_volatile && os::is_MP()) {
  1577     __ membar();
  1582 void LIRGenerator::do_LoadField(LoadField* x) {
  1583   bool needs_patching = x->needs_patching();
  1584   bool is_volatile = x->field()->is_volatile();
  1585   BasicType field_type = x->field_type();
  1587   CodeEmitInfo* info = NULL;
  1588   if (needs_patching) {
  1589     assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
  1590     info = state_for(x, x->state_before());
  1591   } else if (x->needs_null_check()) {
  1592     NullCheck* nc = x->explicit_null_check();
  1593     if (nc == NULL) {
  1594       info = state_for(x);
  1595     } else {
  1596       info = state_for(nc);
  1600   LIRItem object(x->obj(), this);
  1602   object.load_item();
  1604 #ifndef PRODUCT
  1605   if (PrintNotLoaded && needs_patching) {
  1606     tty->print_cr("   ###class not loaded at load_%s bci %d",
  1607                   x->is_static() ?  "static" : "field", x->printable_bci());
  1609 #endif
  1611   if (x->needs_null_check() &&
  1612       (needs_patching ||
  1613        MacroAssembler::needs_explicit_null_check(x->offset()))) {
  1614     // emit an explicit null check because the offset is too large
  1615     __ null_check(object.result(), new CodeEmitInfo(info));
  1618   LIR_Opr reg = rlock_result(x, field_type);
  1619   LIR_Address* address;
  1620   if (needs_patching) {
  1621     // we need to patch the offset in the instruction so don't allow
  1622     // generate_address to try to be smart about emitting the -1.
  1623     // Otherwise the patching code won't know how to find the
  1624     // instruction to patch.
  1625     address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
  1626   } else {
  1627     address = generate_address(object.result(), x->offset(), field_type);
  1630   if (is_volatile) {
  1631     assert(!needs_patching && x->is_loaded(),
  1632            "how do we know it's volatile if it's not loaded");
  1633     volatile_field_load(address, reg, info);
  1634   } else {
  1635     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
  1636     __ load(address, reg, info, patch_code);
  1639   if (is_volatile && os::is_MP()) {
  1640     __ membar_acquire();
  1645 //------------------------java.nio.Buffer.checkIndex------------------------
  1647 // int java.nio.Buffer.checkIndex(int)
  1648 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
  1649   // NOTE: by the time we are in checkIndex() we are guaranteed that
  1650   // the buffer is non-null (because checkIndex is package-private and
  1651   // only called from within other methods in the buffer).
  1652   assert(x->number_of_arguments() == 2, "wrong type");
  1653   LIRItem buf  (x->argument_at(0), this);
  1654   LIRItem index(x->argument_at(1), this);
  1655   buf.load_item();
  1656   index.load_item();
  1658   LIR_Opr result = rlock_result(x);
  1659   if (GenerateRangeChecks) {
  1660     CodeEmitInfo* info = state_for(x);
  1661     CodeStub* stub = new RangeCheckStub(info, index.result(), true);
  1662     if (index.result()->is_constant()) {
  1663       cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
  1664       __ branch(lir_cond_belowEqual, T_INT, stub);
  1665     } else {
  1666       cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
  1667                   java_nio_Buffer::limit_offset(), T_INT, info);
  1668       __ branch(lir_cond_aboveEqual, T_INT, stub);
  1670     __ move(index.result(), result);
  1671   } else {
  1672     // Just load the index into the result register
  1673     __ move(index.result(), result);
  1678 //------------------------array access--------------------------------------
  1681 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
  1682   LIRItem array(x->array(), this);
  1683   array.load_item();
  1684   LIR_Opr reg = rlock_result(x);
  1686   CodeEmitInfo* info = NULL;
  1687   if (x->needs_null_check()) {
  1688     NullCheck* nc = x->explicit_null_check();
  1689     if (nc == NULL) {
  1690       info = state_for(x);
  1691     } else {
  1692       info = state_for(nc);
  1695   __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
  1699 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
  1700   bool use_length = x->length() != NULL;
  1701   LIRItem array(x->array(), this);
  1702   LIRItem index(x->index(), this);
  1703   LIRItem length(this);
  1704   bool needs_range_check = true;
  1706   if (use_length) {
  1707     needs_range_check = x->compute_needs_range_check();
  1708     if (needs_range_check) {
  1709       length.set_instruction(x->length());
  1710       length.load_item();
  1714   array.load_item();
  1715   if (index.is_constant() && can_inline_as_constant(x->index())) {
  1716     // let it be a constant
  1717     index.dont_load_item();
  1718   } else {
  1719     index.load_item();
  1722   CodeEmitInfo* range_check_info = state_for(x);
  1723   CodeEmitInfo* null_check_info = NULL;
  1724   if (x->needs_null_check()) {
  1725     NullCheck* nc = x->explicit_null_check();
  1726     if (nc != NULL) {
  1727       null_check_info = state_for(nc);
  1728     } else {
  1729       null_check_info = range_check_info;
  1733   // emit array address setup early so it schedules better
  1734   LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
  1736   if (GenerateRangeChecks && needs_range_check) {
  1737     if (use_length) {
  1738       // TODO: use a (modified) version of array_range_check that does not require a
  1739       //       constant length to be loaded to a register
  1740       __ cmp(lir_cond_belowEqual, length.result(), index.result());
  1741       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
  1742     } else {
  1743       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
  1744       // The range check performs the null check, so clear it out for the load
  1745       null_check_info = NULL;
  1749   __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
  1753 void LIRGenerator::do_NullCheck(NullCheck* x) {
  1754   if (x->can_trap()) {
  1755     LIRItem value(x->obj(), this);
  1756     value.load_item();
  1757     CodeEmitInfo* info = state_for(x);
  1758     __ null_check(value.result(), info);
  1763 void LIRGenerator::do_Throw(Throw* x) {
  1764   LIRItem exception(x->exception(), this);
  1765   exception.load_item();
  1766   set_no_result(x);
  1767   LIR_Opr exception_opr = exception.result();
  1768   CodeEmitInfo* info = state_for(x, x->state());
  1770 #ifndef PRODUCT
  1771   if (PrintC1Statistics) {
  1772     increment_counter(Runtime1::throw_count_address(), T_INT);
  1774 #endif
  1776   // check if the instruction has an xhandler in any of the nested scopes
  1777   bool unwind = false;
  1778   if (info->exception_handlers()->length() == 0) {
  1779     // this throw is not inside an xhandler
  1780     unwind = true;
  1781   } else {
  1782     // get some idea of the throw type
  1783     bool type_is_exact = true;
  1784     ciType* throw_type = x->exception()->exact_type();
  1785     if (throw_type == NULL) {
  1786       type_is_exact = false;
  1787       throw_type = x->exception()->declared_type();
  1789     if (throw_type != NULL && throw_type->is_instance_klass()) {
  1790       ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
  1791       unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
  1795   // do null check before moving exception oop into fixed register
  1796   // to avoid a fixed interval with an oop during the null check.
  1797   // Use a copy of the CodeEmitInfo because debug information is
  1798   // different for null_check and throw.
  1799   if (GenerateCompilerNullChecks &&
  1800       (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
  1801     // if the exception object wasn't created using new then it might be null.
  1802     __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
  1805   if (compilation()->env()->jvmti_can_post_on_exceptions()) {
  1806     // we need to go through the exception lookup path to get JVMTI
  1807     // notification done
  1808     unwind = false;
  1811   // move exception oop into fixed register
  1812   __ move(exception_opr, exceptionOopOpr());
  1814   if (unwind) {
  1815     __ unwind_exception(exceptionOopOpr());
  1816   } else {
  1817     __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
  1822 void LIRGenerator::do_RoundFP(RoundFP* x) {
  1823   LIRItem input(x->input(), this);
  1824   input.load_item();
  1825   LIR_Opr input_opr = input.result();
  1826   assert(input_opr->is_register(), "why round if value is not in a register?");
  1827   assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
  1828   if (input_opr->is_single_fpu()) {
  1829     set_result(x, round_item(input_opr)); // This code path not currently taken
  1830   } else {
  1831     LIR_Opr result = new_register(T_DOUBLE);
  1832     set_vreg_flag(result, must_start_in_memory);
  1833     __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
  1834     set_result(x, result);
  1838 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
  1839   LIRItem base(x->base(), this);
  1840   LIRItem idx(this);
  1842   base.load_item();
  1843   if (x->has_index()) {
  1844     idx.set_instruction(x->index());
  1845     idx.load_nonconstant();
  1848   LIR_Opr reg = rlock_result(x, x->basic_type());
  1850   int   log2_scale = 0;
  1851   if (x->has_index()) {
  1852     assert(x->index()->type()->tag() == intTag, "should not find non-int index");
  1853     log2_scale = x->log2_scale();
  1856   assert(!x->has_index() || idx.value() == x->index(), "should match");
  1858   LIR_Opr base_op = base.result();
  1859 #ifndef _LP64
  1860   if (x->base()->type()->tag() == longTag) {
  1861     base_op = new_register(T_INT);
  1862     __ convert(Bytecodes::_l2i, base.result(), base_op);
  1863   } else {
  1864     assert(x->base()->type()->tag() == intTag, "must be");
  1866 #endif
  1868   BasicType dst_type = x->basic_type();
  1869   LIR_Opr index_op = idx.result();
  1871   LIR_Address* addr;
  1872   if (index_op->is_constant()) {
  1873     assert(log2_scale == 0, "must not have a scale");
  1874     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
  1875   } else {
  1876 #ifdef X86
  1877 #ifdef _LP64
  1878     if (!index_op->is_illegal() && index_op->type() == T_INT) {
  1879       LIR_Opr tmp = new_pointer_register();
  1880       __ convert(Bytecodes::_i2l, index_op, tmp);
  1881       index_op = tmp;
  1883 #endif
  1884     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
  1885 #elif defined(ARM)
  1886     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
  1887 #else
  1888     if (index_op->is_illegal() || log2_scale == 0) {
  1889 #ifdef _LP64
  1890       if (!index_op->is_illegal() && index_op->type() == T_INT) {
  1891         LIR_Opr tmp = new_pointer_register();
  1892         __ convert(Bytecodes::_i2l, index_op, tmp);
  1893         index_op = tmp;
  1895 #endif
  1896       addr = new LIR_Address(base_op, index_op, dst_type);
  1897     } else {
  1898       LIR_Opr tmp = new_pointer_register();
  1899       __ shift_left(index_op, log2_scale, tmp);
  1900       addr = new LIR_Address(base_op, tmp, dst_type);
  1902 #endif
  1905   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
  1906     __ unaligned_move(addr, reg);
  1907   } else {
  1908     if (dst_type == T_OBJECT && x->is_wide()) {
  1909       __ move_wide(addr, reg);
  1910     } else {
  1911       __ move(addr, reg);
  1917 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
  1918   int  log2_scale = 0;
  1919   BasicType type = x->basic_type();
  1921   if (x->has_index()) {
  1922     assert(x->index()->type()->tag() == intTag, "should not find non-int index");
  1923     log2_scale = x->log2_scale();
  1926   LIRItem base(x->base(), this);
  1927   LIRItem value(x->value(), this);
  1928   LIRItem idx(this);
  1930   base.load_item();
  1931   if (x->has_index()) {
  1932     idx.set_instruction(x->index());
  1933     idx.load_item();
  1936   if (type == T_BYTE || type == T_BOOLEAN) {
  1937     value.load_byte_item();
  1938   } else {
  1939     value.load_item();
  1942   set_no_result(x);
  1944   LIR_Opr base_op = base.result();
  1945 #ifndef _LP64
  1946   if (x->base()->type()->tag() == longTag) {
  1947     base_op = new_register(T_INT);
  1948     __ convert(Bytecodes::_l2i, base.result(), base_op);
  1949   } else {
  1950     assert(x->base()->type()->tag() == intTag, "must be");
  1952 #endif
  1954   LIR_Opr index_op = idx.result();
  1955   if (log2_scale != 0) {
  1956     // temporary fix (platform dependent code without shift on Intel would be better)
  1957     index_op = new_pointer_register();
  1958 #ifdef _LP64
  1959     if(idx.result()->type() == T_INT) {
  1960       __ convert(Bytecodes::_i2l, idx.result(), index_op);
  1961     } else {
  1962 #endif
  1963       // TODO: ARM also allows embedded shift in the address
  1964       __ move(idx.result(), index_op);
  1965 #ifdef _LP64
  1967 #endif
  1968     __ shift_left(index_op, log2_scale, index_op);
  1970 #ifdef _LP64
  1971   else if(!index_op->is_illegal() && index_op->type() == T_INT) {
  1972     LIR_Opr tmp = new_pointer_register();
  1973     __ convert(Bytecodes::_i2l, index_op, tmp);
  1974     index_op = tmp;
  1976 #endif
  1978   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
  1979   __ move(value.result(), addr);
  1983 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
  1984   BasicType type = x->basic_type();
  1985   LIRItem src(x->object(), this);
  1986   LIRItem off(x->offset(), this);
  1988   off.load_item();
  1989   src.load_item();
  1991   LIR_Opr reg = reg = rlock_result(x, x->basic_type());
  1993   if (x->is_volatile() && os::is_MP()) __ membar_acquire();
  1994   get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
  1995   if (x->is_volatile() && os::is_MP()) __ membar();
  1999 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
  2000   BasicType type = x->basic_type();
  2001   LIRItem src(x->object(), this);
  2002   LIRItem off(x->offset(), this);
  2003   LIRItem data(x->value(), this);
  2005   src.load_item();
  2006   if (type == T_BOOLEAN || type == T_BYTE) {
  2007     data.load_byte_item();
  2008   } else {
  2009     data.load_item();
  2011   off.load_item();
  2013   set_no_result(x);
  2015   if (x->is_volatile() && os::is_MP()) __ membar_release();
  2016   put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
  2020 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
  2021   LIRItem src(x->object(), this);
  2022   LIRItem off(x->offset(), this);
  2024   src.load_item();
  2025   if (off.is_constant() && can_inline_as_constant(x->offset())) {
  2026     // let it be a constant
  2027     off.dont_load_item();
  2028   } else {
  2029     off.load_item();
  2032   set_no_result(x);
  2034   LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
  2035   __ prefetch(addr, is_store);
  2039 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
  2040   do_UnsafePrefetch(x, false);
  2044 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
  2045   do_UnsafePrefetch(x, true);
  2049 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
  2050   int lng = x->length();
  2052   for (int i = 0; i < lng; i++) {
  2053     SwitchRange* one_range = x->at(i);
  2054     int low_key = one_range->low_key();
  2055     int high_key = one_range->high_key();
  2056     BlockBegin* dest = one_range->sux();
  2057     if (low_key == high_key) {
  2058       __ cmp(lir_cond_equal, value, low_key);
  2059       __ branch(lir_cond_equal, T_INT, dest);
  2060     } else if (high_key - low_key == 1) {
  2061       __ cmp(lir_cond_equal, value, low_key);
  2062       __ branch(lir_cond_equal, T_INT, dest);
  2063       __ cmp(lir_cond_equal, value, high_key);
  2064       __ branch(lir_cond_equal, T_INT, dest);
  2065     } else {
  2066       LabelObj* L = new LabelObj();
  2067       __ cmp(lir_cond_less, value, low_key);
  2068       __ branch(lir_cond_less, L->label());
  2069       __ cmp(lir_cond_lessEqual, value, high_key);
  2070       __ branch(lir_cond_lessEqual, T_INT, dest);
  2071       __ branch_destination(L->label());
  2074   __ jump(default_sux);
  2078 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
  2079   SwitchRangeList* res = new SwitchRangeList();
  2080   int len = x->length();
  2081   if (len > 0) {
  2082     BlockBegin* sux = x->sux_at(0);
  2083     int key = x->lo_key();
  2084     BlockBegin* default_sux = x->default_sux();
  2085     SwitchRange* range = new SwitchRange(key, sux);
  2086     for (int i = 0; i < len; i++, key++) {
  2087       BlockBegin* new_sux = x->sux_at(i);
  2088       if (sux == new_sux) {
  2089         // still in same range
  2090         range->set_high_key(key);
  2091       } else {
  2092         // skip tests which explicitly dispatch to the default
  2093         if (sux != default_sux) {
  2094           res->append(range);
  2096         range = new SwitchRange(key, new_sux);
  2098       sux = new_sux;
  2100     if (res->length() == 0 || res->last() != range)  res->append(range);
  2102   return res;
  2106 // we expect the keys to be sorted by increasing value
  2107 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
  2108   SwitchRangeList* res = new SwitchRangeList();
  2109   int len = x->length();
  2110   if (len > 0) {
  2111     BlockBegin* default_sux = x->default_sux();
  2112     int key = x->key_at(0);
  2113     BlockBegin* sux = x->sux_at(0);
  2114     SwitchRange* range = new SwitchRange(key, sux);
  2115     for (int i = 1; i < len; i++) {
  2116       int new_key = x->key_at(i);
  2117       BlockBegin* new_sux = x->sux_at(i);
  2118       if (key+1 == new_key && sux == new_sux) {
  2119         // still in same range
  2120         range->set_high_key(new_key);
  2121       } else {
  2122         // skip tests which explicitly dispatch to the default
  2123         if (range->sux() != default_sux) {
  2124           res->append(range);
  2126         range = new SwitchRange(new_key, new_sux);
  2128       key = new_key;
  2129       sux = new_sux;
  2131     if (res->length() == 0 || res->last() != range)  res->append(range);
  2133   return res;
  2137 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
  2138   LIRItem tag(x->tag(), this);
  2139   tag.load_item();
  2140   set_no_result(x);
  2142   if (x->is_safepoint()) {
  2143     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
  2146   // move values into phi locations
  2147   move_to_phi(x->state());
  2149   int lo_key = x->lo_key();
  2150   int hi_key = x->hi_key();
  2151   int len = x->length();
  2152   LIR_Opr value = tag.result();
  2153   if (UseTableRanges) {
  2154     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
  2155   } else {
  2156     for (int i = 0; i < len; i++) {
  2157       __ cmp(lir_cond_equal, value, i + lo_key);
  2158       __ branch(lir_cond_equal, T_INT, x->sux_at(i));
  2160     __ jump(x->default_sux());
  2165 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
  2166   LIRItem tag(x->tag(), this);
  2167   tag.load_item();
  2168   set_no_result(x);
  2170   if (x->is_safepoint()) {
  2171     __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
  2174   // move values into phi locations
  2175   move_to_phi(x->state());
  2177   LIR_Opr value = tag.result();
  2178   if (UseTableRanges) {
  2179     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
  2180   } else {
  2181     int len = x->length();
  2182     for (int i = 0; i < len; i++) {
  2183       __ cmp(lir_cond_equal, value, x->key_at(i));
  2184       __ branch(lir_cond_equal, T_INT, x->sux_at(i));
  2186     __ jump(x->default_sux());
  2191 void LIRGenerator::do_Goto(Goto* x) {
  2192   set_no_result(x);
  2194   if (block()->next()->as_OsrEntry()) {
  2195     // need to free up storage used for OSR entry point
  2196     LIR_Opr osrBuffer = block()->next()->operand();
  2197     BasicTypeList signature;
  2198     signature.append(T_INT);
  2199     CallingConvention* cc = frame_map()->c_calling_convention(&signature);
  2200     __ move(osrBuffer, cc->args()->at(0));
  2201     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
  2202                          getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
  2205   if (x->is_safepoint()) {
  2206     ValueStack* state = x->state_before() ? x->state_before() : x->state();
  2208     // increment backedge counter if needed
  2209     CodeEmitInfo* info = state_for(x, state);
  2210     increment_backedge_counter(info, info->stack()->bci());
  2211     CodeEmitInfo* safepoint_info = state_for(x, state);
  2212     __ safepoint(safepoint_poll_register(), safepoint_info);
  2215   // Gotos can be folded Ifs, handle this case.
  2216   if (x->should_profile()) {
  2217     ciMethod* method = x->profiled_method();
  2218     assert(method != NULL, "method should be set if branch is profiled");
  2219     ciMethodData* md = method->method_data_or_null();
  2220     assert(md != NULL, "Sanity");
  2221     ciProfileData* data = md->bci_to_data(x->profiled_bci());
  2222     assert(data != NULL, "must have profiling data");
  2223     int offset;
  2224     if (x->direction() == Goto::taken) {
  2225       assert(data->is_BranchData(), "need BranchData for two-way branches");
  2226       offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
  2227     } else if (x->direction() == Goto::not_taken) {
  2228       assert(data->is_BranchData(), "need BranchData for two-way branches");
  2229       offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
  2230     } else {
  2231       assert(data->is_JumpData(), "need JumpData for branches");
  2232       offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
  2234     LIR_Opr md_reg = new_register(T_OBJECT);
  2235     __ oop2reg(md->constant_encoding(), md_reg);
  2237     increment_counter(new LIR_Address(md_reg, offset,
  2238                                       NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
  2241   // emit phi-instruction move after safepoint since this simplifies
  2242   // describing the state as the safepoint.
  2243   move_to_phi(x->state());
  2245   __ jump(x->default_sux());
  2249 void LIRGenerator::do_Base(Base* x) {
  2250   __ std_entry(LIR_OprFact::illegalOpr);
  2251   // Emit moves from physical registers / stack slots to virtual registers
  2252   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
  2253   IRScope* irScope = compilation()->hir()->top_scope();
  2254   int java_index = 0;
  2255   for (int i = 0; i < args->length(); i++) {
  2256     LIR_Opr src = args->at(i);
  2257     assert(!src->is_illegal(), "check");
  2258     BasicType t = src->type();
  2260     // Types which are smaller than int are passed as int, so
  2261     // correct the type which passed.
  2262     switch (t) {
  2263     case T_BYTE:
  2264     case T_BOOLEAN:
  2265     case T_SHORT:
  2266     case T_CHAR:
  2267       t = T_INT;
  2268       break;
  2271     LIR_Opr dest = new_register(t);
  2272     __ move(src, dest);
  2274     // Assign new location to Local instruction for this local
  2275     Local* local = x->state()->local_at(java_index)->as_Local();
  2276     assert(local != NULL, "Locals for incoming arguments must have been created");
  2277 #ifndef __SOFTFP__
  2278     // The java calling convention passes double as long and float as int.
  2279     assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
  2280 #endif // __SOFTFP__
  2281     local->set_operand(dest);
  2282     _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
  2283     java_index += type2size[t];
  2286   if (compilation()->env()->dtrace_method_probes()) {
  2287     BasicTypeList signature;
  2288     signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT));    // thread
  2289     signature.append(T_OBJECT); // methodOop
  2290     LIR_OprList* args = new LIR_OprList();
  2291     args->append(getThreadPointer());
  2292     LIR_Opr meth = new_register(T_OBJECT);
  2293     __ oop2reg(method()->constant_encoding(), meth);
  2294     args->append(meth);
  2295     call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
  2298   if (method()->is_synchronized()) {
  2299     LIR_Opr obj;
  2300     if (method()->is_static()) {
  2301       obj = new_register(T_OBJECT);
  2302       __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
  2303     } else {
  2304       Local* receiver = x->state()->local_at(0)->as_Local();
  2305       assert(receiver != NULL, "must already exist");
  2306       obj = receiver->operand();
  2308     assert(obj->is_valid(), "must be valid");
  2310     if (method()->is_synchronized() && GenerateSynchronizationCode) {
  2311       LIR_Opr lock = new_register(T_INT);
  2312       __ load_stack_address_monitor(0, lock);
  2314       CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
  2315       CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
  2317       // receiver is guaranteed non-NULL so don't need CodeEmitInfo
  2318       __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
  2322   // increment invocation counters if needed
  2323   if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
  2324     CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL);
  2325     increment_invocation_counter(info);
  2328   // all blocks with a successor must end with an unconditional jump
  2329   // to the successor even if they are consecutive
  2330   __ jump(x->default_sux());
  2334 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
  2335   // construct our frame and model the production of incoming pointer
  2336   // to the OSR buffer.
  2337   __ osr_entry(LIR_Assembler::osrBufferPointer());
  2338   LIR_Opr result = rlock_result(x);
  2339   __ move(LIR_Assembler::osrBufferPointer(), result);
  2343 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
  2344   int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0;
  2345   for (; i < args->length(); i++) {
  2346     LIRItem* param = args->at(i);
  2347     LIR_Opr loc = arg_list->at(i);
  2348     if (loc->is_register()) {
  2349       param->load_item_force(loc);
  2350     } else {
  2351       LIR_Address* addr = loc->as_address_ptr();
  2352       param->load_for_store(addr->type());
  2353       if (addr->type() == T_OBJECT) {
  2354         __ move_wide(param->result(), addr);
  2355       } else
  2356         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
  2357           __ unaligned_move(param->result(), addr);
  2358         } else {
  2359           __ move(param->result(), addr);
  2364   if (x->has_receiver()) {
  2365     LIRItem* receiver = args->at(0);
  2366     LIR_Opr loc = arg_list->at(0);
  2367     if (loc->is_register()) {
  2368       receiver->load_item_force(loc);
  2369     } else {
  2370       assert(loc->is_address(), "just checking");
  2371       receiver->load_for_store(T_OBJECT);
  2372       __ move_wide(receiver->result(), loc->as_address_ptr());
  2378 // Visits all arguments, returns appropriate items without loading them
  2379 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
  2380   LIRItemList* argument_items = new LIRItemList();
  2381   if (x->has_receiver()) {
  2382     LIRItem* receiver = new LIRItem(x->receiver(), this);
  2383     argument_items->append(receiver);
  2385   if (x->is_invokedynamic()) {
  2386     // Insert a dummy for the synthetic MethodHandle argument.
  2387     argument_items->append(NULL);
  2389   int idx = x->has_receiver() ? 1 : 0;
  2390   for (int i = 0; i < x->number_of_arguments(); i++) {
  2391     LIRItem* param = new LIRItem(x->argument_at(i), this);
  2392     argument_items->append(param);
  2393     idx += (param->type()->is_double_word() ? 2 : 1);
  2395   return argument_items;
  2399 // The invoke with receiver has following phases:
  2400 //   a) traverse and load/lock receiver;
  2401 //   b) traverse all arguments -> item-array (invoke_visit_argument)
  2402 //   c) push receiver on stack
  2403 //   d) load each of the items and push on stack
  2404 //   e) unlock receiver
  2405 //   f) move receiver into receiver-register %o0
  2406 //   g) lock result registers and emit call operation
  2407 //
  2408 // Before issuing a call, we must spill-save all values on stack
  2409 // that are in caller-save register. "spill-save" moves thos registers
  2410 // either in a free callee-save register or spills them if no free
  2411 // callee save register is available.
  2412 //
  2413 // The problem is where to invoke spill-save.
  2414 // - if invoked between e) and f), we may lock callee save
  2415 //   register in "spill-save" that destroys the receiver register
  2416 //   before f) is executed
  2417 // - if we rearange the f) to be earlier, by loading %o0, it
  2418 //   may destroy a value on the stack that is currently in %o0
  2419 //   and is waiting to be spilled
  2420 // - if we keep the receiver locked while doing spill-save,
  2421 //   we cannot spill it as it is spill-locked
  2422 //
  2423 void LIRGenerator::do_Invoke(Invoke* x) {
  2424   CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
  2426   LIR_OprList* arg_list = cc->args();
  2427   LIRItemList* args = invoke_visit_arguments(x);
  2428   LIR_Opr receiver = LIR_OprFact::illegalOpr;
  2430   // setup result register
  2431   LIR_Opr result_register = LIR_OprFact::illegalOpr;
  2432   if (x->type() != voidType) {
  2433     result_register = result_register_for(x->type());
  2436   CodeEmitInfo* info = state_for(x, x->state());
  2438   // invokedynamics can deoptimize.
  2439   CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL;
  2441   invoke_load_arguments(x, args, arg_list);
  2443   if (x->has_receiver()) {
  2444     args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
  2445     receiver = args->at(0)->result();
  2448   // emit invoke code
  2449   bool optimized = x->target_is_loaded() && x->target_is_final();
  2450   assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
  2452   // JSR 292
  2453   // Preserve the SP over MethodHandle call sites.
  2454   ciMethod* target = x->target();
  2455   if (target->is_method_handle_invoke()) {
  2456     info->set_is_method_handle_invoke(true);
  2457     __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
  2460   switch (x->code()) {
  2461     case Bytecodes::_invokestatic:
  2462       __ call_static(target, result_register,
  2463                      SharedRuntime::get_resolve_static_call_stub(),
  2464                      arg_list, info);
  2465       break;
  2466     case Bytecodes::_invokespecial:
  2467     case Bytecodes::_invokevirtual:
  2468     case Bytecodes::_invokeinterface:
  2469       // for final target we still produce an inline cache, in order
  2470       // to be able to call mixed mode
  2471       if (x->code() == Bytecodes::_invokespecial || optimized) {
  2472         __ call_opt_virtual(target, receiver, result_register,
  2473                             SharedRuntime::get_resolve_opt_virtual_call_stub(),
  2474                             arg_list, info);
  2475       } else if (x->vtable_index() < 0) {
  2476         __ call_icvirtual(target, receiver, result_register,
  2477                           SharedRuntime::get_resolve_virtual_call_stub(),
  2478                           arg_list, info);
  2479       } else {
  2480         int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
  2481         int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
  2482         __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
  2484       break;
  2485     case Bytecodes::_invokedynamic: {
  2486       ciBytecodeStream bcs(x->scope()->method());
  2487       bcs.force_bci(x->state()->bci());
  2488       assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream");
  2489       ciCPCache* cpcache = bcs.get_cpcache();
  2491       // Get CallSite offset from constant pool cache pointer.
  2492       int index = bcs.get_method_index();
  2493       size_t call_site_offset = cpcache->get_f1_offset(index);
  2495       // If this invokedynamic call site hasn't been executed yet in
  2496       // the interpreter, the CallSite object in the constant pool
  2497       // cache is still null and we need to deoptimize.
  2498       if (cpcache->is_f1_null_at(index)) {
  2499         // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
  2500         // clone all handlers.  This is handled transparently in other
  2501         // places by the CodeEmitInfo cloning logic but is handled
  2502         // specially here because a stub isn't being used.
  2503         x->set_exception_handlers(new XHandlers(x->exception_handlers()));
  2505         DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info);
  2506         __ jump(deopt_stub);
  2509       // Use the receiver register for the synthetic MethodHandle
  2510       // argument.
  2511       receiver = LIR_Assembler::receiverOpr();
  2512       LIR_Opr tmp = new_register(objectType);
  2514       // Load CallSite object from constant pool cache.
  2515       __ oop2reg(cpcache->constant_encoding(), tmp);
  2516       __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp);
  2518       // Load target MethodHandle from CallSite object.
  2519       __ load(new LIR_Address(tmp, java_dyn_CallSite::target_offset_in_bytes(), T_OBJECT), receiver);
  2521       __ call_dynamic(target, receiver, result_register,
  2522                       SharedRuntime::get_resolve_opt_virtual_call_stub(),
  2523                       arg_list, info);
  2524       break;
  2526     default:
  2527       ShouldNotReachHere();
  2528       break;
  2531   // JSR 292
  2532   // Restore the SP after MethodHandle call sites.
  2533   if (target->is_method_handle_invoke()) {
  2534     __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
  2537   if (x->type()->is_float() || x->type()->is_double()) {
  2538     // Force rounding of results from non-strictfp when in strictfp
  2539     // scope (or when we don't know the strictness of the callee, to
  2540     // be safe.)
  2541     if (method()->is_strict()) {
  2542       if (!x->target_is_loaded() || !x->target_is_strictfp()) {
  2543         result_register = round_item(result_register);
  2548   if (result_register->is_valid()) {
  2549     LIR_Opr result = rlock_result(x);
  2550     __ move(result_register, result);
  2555 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
  2556   assert(x->number_of_arguments() == 1, "wrong type");
  2557   LIRItem value       (x->argument_at(0), this);
  2558   LIR_Opr reg = rlock_result(x);
  2559   value.load_item();
  2560   LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
  2561   __ move(tmp, reg);
  2566 // Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
  2567 void LIRGenerator::do_IfOp(IfOp* x) {
  2568 #ifdef ASSERT
  2570     ValueTag xtag = x->x()->type()->tag();
  2571     ValueTag ttag = x->tval()->type()->tag();
  2572     assert(xtag == intTag || xtag == objectTag, "cannot handle others");
  2573     assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
  2574     assert(ttag == x->fval()->type()->tag(), "cannot handle others");
  2576 #endif
  2578   LIRItem left(x->x(), this);
  2579   LIRItem right(x->y(), this);
  2580   left.load_item();
  2581   if (can_inline_as_constant(right.value())) {
  2582     right.dont_load_item();
  2583   } else {
  2584     right.load_item();
  2587   LIRItem t_val(x->tval(), this);
  2588   LIRItem f_val(x->fval(), this);
  2589   t_val.dont_load_item();
  2590   f_val.dont_load_item();
  2591   LIR_Opr reg = rlock_result(x);
  2593   __ cmp(lir_cond(x->cond()), left.result(), right.result());
  2594   __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg);
  2598 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
  2599   switch (x->id()) {
  2600   case vmIntrinsics::_intBitsToFloat      :
  2601   case vmIntrinsics::_doubleToRawLongBits :
  2602   case vmIntrinsics::_longBitsToDouble    :
  2603   case vmIntrinsics::_floatToRawIntBits   : {
  2604     do_FPIntrinsics(x);
  2605     break;
  2608   case vmIntrinsics::_currentTimeMillis: {
  2609     assert(x->number_of_arguments() == 0, "wrong type");
  2610     LIR_Opr reg = result_register_for(x->type());
  2611     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
  2612                          reg, new LIR_OprList());
  2613     LIR_Opr result = rlock_result(x);
  2614     __ move(reg, result);
  2615     break;
  2618   case vmIntrinsics::_nanoTime: {
  2619     assert(x->number_of_arguments() == 0, "wrong type");
  2620     LIR_Opr reg = result_register_for(x->type());
  2621     __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
  2622                          reg, new LIR_OprList());
  2623     LIR_Opr result = rlock_result(x);
  2624     __ move(reg, result);
  2625     break;
  2628   case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
  2629   case vmIntrinsics::_getClass:       do_getClass(x);      break;
  2630   case vmIntrinsics::_currentThread:  do_currentThread(x); break;
  2632   case vmIntrinsics::_dlog:           // fall through
  2633   case vmIntrinsics::_dlog10:         // fall through
  2634   case vmIntrinsics::_dabs:           // fall through
  2635   case vmIntrinsics::_dsqrt:          // fall through
  2636   case vmIntrinsics::_dtan:           // fall through
  2637   case vmIntrinsics::_dsin :          // fall through
  2638   case vmIntrinsics::_dcos :          do_MathIntrinsic(x); break;
  2639   case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
  2641   // java.nio.Buffer.checkIndex
  2642   case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
  2644   case vmIntrinsics::_compareAndSwapObject:
  2645     do_CompareAndSwap(x, objectType);
  2646     break;
  2647   case vmIntrinsics::_compareAndSwapInt:
  2648     do_CompareAndSwap(x, intType);
  2649     break;
  2650   case vmIntrinsics::_compareAndSwapLong:
  2651     do_CompareAndSwap(x, longType);
  2652     break;
  2654     // sun.misc.AtomicLongCSImpl.attemptUpdate
  2655   case vmIntrinsics::_attemptUpdate:
  2656     do_AttemptUpdate(x);
  2657     break;
  2659   default: ShouldNotReachHere(); break;
  2663 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
  2664   // Need recv in a temporary register so it interferes with the other temporaries
  2665   LIR_Opr recv = LIR_OprFact::illegalOpr;
  2666   LIR_Opr mdo = new_register(T_OBJECT);
  2667   // tmp is used to hold the counters on SPARC
  2668   LIR_Opr tmp = new_pointer_register();
  2669   if (x->recv() != NULL) {
  2670     LIRItem value(x->recv(), this);
  2671     value.load_item();
  2672     recv = new_register(T_OBJECT);
  2673     __ move(value.result(), recv);
  2675   __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
  2678 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
  2679   // We can safely ignore accessors here, since c2 will inline them anyway,
  2680   // accessors are also always mature.
  2681   if (!x->inlinee()->is_accessor()) {
  2682     CodeEmitInfo* info = state_for(x, x->state(), true);
  2683     // Increment invocation counter, don't notify the runtime, because we don't inline loops,
  2684     increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false);
  2688 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
  2689   int freq_log;
  2690   int level = compilation()->env()->comp_level();
  2691   if (level == CompLevel_limited_profile) {
  2692     freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
  2693   } else if (level == CompLevel_full_profile) {
  2694     freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
  2695   } else {
  2696     ShouldNotReachHere();
  2698   // Increment the appropriate invocation/backedge counter and notify the runtime.
  2699   increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
  2702 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
  2703                                                 ciMethod *method, int frequency,
  2704                                                 int bci, bool backedge, bool notify) {
  2705   assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
  2706   int level = _compilation->env()->comp_level();
  2707   assert(level > CompLevel_simple, "Shouldn't be here");
  2709   int offset = -1;
  2710   LIR_Opr counter_holder = new_register(T_OBJECT);
  2711   LIR_Opr meth;
  2712   if (level == CompLevel_limited_profile) {
  2713     offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
  2714                                  methodOopDesc::invocation_counter_offset());
  2715     __ oop2reg(method->constant_encoding(), counter_holder);
  2716     meth = counter_holder;
  2717   } else if (level == CompLevel_full_profile) {
  2718     offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
  2719                                  methodDataOopDesc::invocation_counter_offset());
  2720     ciMethodData* md = method->method_data_or_null();
  2721     assert(md != NULL, "Sanity");
  2722     __ oop2reg(md->constant_encoding(), counter_holder);
  2723     meth = new_register(T_OBJECT);
  2724     __ oop2reg(method->constant_encoding(), meth);
  2725   } else {
  2726     ShouldNotReachHere();
  2728   LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
  2729   LIR_Opr result = new_register(T_INT);
  2730   __ load(counter, result);
  2731   __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
  2732   __ store(result, counter);
  2733   if (notify) {
  2734     LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
  2735     __ logical_and(result, mask, result);
  2736     __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
  2737     // The bci for info can point to cmp for if's we want the if bci
  2738     CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
  2739     __ branch(lir_cond_equal, T_INT, overflow);
  2740     __ branch_destination(overflow->continuation());
  2744 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
  2745   LIRItemList args(1);
  2746   LIRItem value(arg1, this);
  2747   args.append(&value);
  2748   BasicTypeList signature;
  2749   signature.append(as_BasicType(arg1->type()));
  2751   return call_runtime(&signature, &args, entry, result_type, info);
  2755 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
  2756   LIRItemList args(2);
  2757   LIRItem value1(arg1, this);
  2758   LIRItem value2(arg2, this);
  2759   args.append(&value1);
  2760   args.append(&value2);
  2761   BasicTypeList signature;
  2762   signature.append(as_BasicType(arg1->type()));
  2763   signature.append(as_BasicType(arg2->type()));
  2765   return call_runtime(&signature, &args, entry, result_type, info);
  2769 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
  2770                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
  2771   // get a result register
  2772   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
  2773   LIR_Opr result = LIR_OprFact::illegalOpr;
  2774   if (result_type->tag() != voidTag) {
  2775     result = new_register(result_type);
  2776     phys_reg = result_register_for(result_type);
  2779   // move the arguments into the correct location
  2780   CallingConvention* cc = frame_map()->c_calling_convention(signature);
  2781   assert(cc->length() == args->length(), "argument mismatch");
  2782   for (int i = 0; i < args->length(); i++) {
  2783     LIR_Opr arg = args->at(i);
  2784     LIR_Opr loc = cc->at(i);
  2785     if (loc->is_register()) {
  2786       __ move(arg, loc);
  2787     } else {
  2788       LIR_Address* addr = loc->as_address_ptr();
  2789 //           if (!can_store_as_constant(arg)) {
  2790 //             LIR_Opr tmp = new_register(arg->type());
  2791 //             __ move(arg, tmp);
  2792 //             arg = tmp;
  2793 //           }
  2794       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
  2795         __ unaligned_move(arg, addr);
  2796       } else {
  2797         __ move(arg, addr);
  2802   if (info) {
  2803     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
  2804   } else {
  2805     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
  2807   if (result->is_valid()) {
  2808     __ move(phys_reg, result);
  2810   return result;
  2814 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
  2815                                    address entry, ValueType* result_type, CodeEmitInfo* info) {
  2816   // get a result register
  2817   LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
  2818   LIR_Opr result = LIR_OprFact::illegalOpr;
  2819   if (result_type->tag() != voidTag) {
  2820     result = new_register(result_type);
  2821     phys_reg = result_register_for(result_type);
  2824   // move the arguments into the correct location
  2825   CallingConvention* cc = frame_map()->c_calling_convention(signature);
  2827   assert(cc->length() == args->length(), "argument mismatch");
  2828   for (int i = 0; i < args->length(); i++) {
  2829     LIRItem* arg = args->at(i);
  2830     LIR_Opr loc = cc->at(i);
  2831     if (loc->is_register()) {
  2832       arg->load_item_force(loc);
  2833     } else {
  2834       LIR_Address* addr = loc->as_address_ptr();
  2835       arg->load_for_store(addr->type());
  2836       if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
  2837         __ unaligned_move(arg->result(), addr);
  2838       } else {
  2839         __ move(arg->result(), addr);
  2844   if (info) {
  2845     __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
  2846   } else {
  2847     __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
  2849   if (result->is_valid()) {
  2850     __ move(phys_reg, result);
  2852   return result;

mercurial