src/share/vm/c1/c1_LIRGenerator.cpp

changeset 435
a61af66fc99e
child 739
dc7f315e41f7
child 777
37f87013dfd8
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,2534 @@
     1.4 +/*
     1.5 + * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +# include "incls/_precompiled.incl"
    1.29 +# include "incls/_c1_LIRGenerator.cpp.incl"
    1.30 +
    1.31 +#ifdef ASSERT
    1.32 +#define __ gen()->lir(__FILE__, __LINE__)->
    1.33 +#else
    1.34 +#define __ gen()->lir()->
    1.35 +#endif
    1.36 +
    1.37 +
    1.38 +void PhiResolverState::reset(int max_vregs) {
    1.39 +  // Initialize array sizes
    1.40 +  _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
    1.41 +  _virtual_operands.trunc_to(0);
    1.42 +  _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
    1.43 +  _other_operands.trunc_to(0);
    1.44 +  _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
    1.45 +  _vreg_table.trunc_to(0);
    1.46 +}
    1.47 +
    1.48 +
    1.49 +
    1.50 +//--------------------------------------------------------------
    1.51 +// PhiResolver
    1.52 +
    1.53 +// Resolves cycles:
    1.54 +//
    1.55 +//  r1 := r2  becomes  temp := r1
    1.56 +//  r2 := r1           r1 := r2
    1.57 +//                     r2 := temp
    1.58 +// and orders moves:
    1.59 +//
    1.60 +//  r2 := r3  becomes  r1 := r2
    1.61 +//  r1 := r2           r2 := r3
    1.62 +
    1.63 +PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
    1.64 + : _gen(gen)
    1.65 + , _state(gen->resolver_state())
    1.66 + , _temp(LIR_OprFact::illegalOpr)
    1.67 +{
    1.68 +  // reinitialize the shared state arrays
    1.69 +  _state.reset(max_vregs);
    1.70 +}
    1.71 +
    1.72 +
    1.73 +void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
    1.74 +  assert(src->is_valid(), "");
    1.75 +  assert(dest->is_valid(), "");
    1.76 +  __ move(src, dest);
    1.77 +}
    1.78 +
    1.79 +
    1.80 +void PhiResolver::move_temp_to(LIR_Opr dest) {
    1.81 +  assert(_temp->is_valid(), "");
    1.82 +  emit_move(_temp, dest);
    1.83 +  NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
    1.84 +}
    1.85 +
    1.86 +
    1.87 +void PhiResolver::move_to_temp(LIR_Opr src) {
    1.88 +  assert(_temp->is_illegal(), "");
    1.89 +  _temp = _gen->new_register(src->type());
    1.90 +  emit_move(src, _temp);
    1.91 +}
    1.92 +
    1.93 +
    1.94 +// Traverse assignment graph in depth first order and generate moves in post order
    1.95 +// ie. two assignments: b := c, a := b start with node c:
    1.96 +// Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
    1.97 +// Generates moves in this order: move b to a and move c to b
    1.98 +// ie. cycle a := b, b := a start with node a
    1.99 +// Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
   1.100 +// Generates moves in this order: move b to temp, move a to b, move temp to a
   1.101 +void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
   1.102 +  if (!dest->visited()) {
   1.103 +    dest->set_visited();
   1.104 +    for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
   1.105 +      move(dest, dest->destination_at(i));
   1.106 +    }
   1.107 +  } else if (!dest->start_node()) {
   1.108 +    // cylce in graph detected
   1.109 +    assert(_loop == NULL, "only one loop valid!");
   1.110 +    _loop = dest;
   1.111 +    move_to_temp(src->operand());
   1.112 +    return;
   1.113 +  } // else dest is a start node
   1.114 +
   1.115 +  if (!dest->assigned()) {
   1.116 +    if (_loop == dest) {
   1.117 +      move_temp_to(dest->operand());
   1.118 +      dest->set_assigned();
   1.119 +    } else if (src != NULL) {
   1.120 +      emit_move(src->operand(), dest->operand());
   1.121 +      dest->set_assigned();
   1.122 +    }
   1.123 +  }
   1.124 +}
   1.125 +
   1.126 +
   1.127 +PhiResolver::~PhiResolver() {
   1.128 +  int i;
   1.129 +  // resolve any cycles in moves from and to virtual registers
   1.130 +  for (i = virtual_operands().length() - 1; i >= 0; i --) {
   1.131 +    ResolveNode* node = virtual_operands()[i];
   1.132 +    if (!node->visited()) {
   1.133 +      _loop = NULL;
   1.134 +      move(NULL, node);
   1.135 +      node->set_start_node();
   1.136 +      assert(_temp->is_illegal(), "move_temp_to() call missing");
   1.137 +    }
   1.138 +  }
   1.139 +
   1.140 +  // generate move for move from non virtual register to abitrary destination
   1.141 +  for (i = other_operands().length() - 1; i >= 0; i --) {
   1.142 +    ResolveNode* node = other_operands()[i];
   1.143 +    for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
   1.144 +      emit_move(node->operand(), node->destination_at(j)->operand());
   1.145 +    }
   1.146 +  }
   1.147 +}
   1.148 +
   1.149 +
   1.150 +ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
   1.151 +  ResolveNode* node;
   1.152 +  if (opr->is_virtual()) {
   1.153 +    int vreg_num = opr->vreg_number();
   1.154 +    node = vreg_table().at_grow(vreg_num, NULL);
   1.155 +    assert(node == NULL || node->operand() == opr, "");
   1.156 +    if (node == NULL) {
   1.157 +      node = new ResolveNode(opr);
   1.158 +      vreg_table()[vreg_num] = node;
   1.159 +    }
   1.160 +    // Make sure that all virtual operands show up in the list when
   1.161 +    // they are used as the source of a move.
   1.162 +    if (source && !virtual_operands().contains(node)) {
   1.163 +      virtual_operands().append(node);
   1.164 +    }
   1.165 +  } else {
   1.166 +    assert(source, "");
   1.167 +    node = new ResolveNode(opr);
   1.168 +    other_operands().append(node);
   1.169 +  }
   1.170 +  return node;
   1.171 +}
   1.172 +
   1.173 +
   1.174 +void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
   1.175 +  assert(dest->is_virtual(), "");
   1.176 +  // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
   1.177 +  assert(src->is_valid(), "");
   1.178 +  assert(dest->is_valid(), "");
   1.179 +  ResolveNode* source = source_node(src);
   1.180 +  source->append(destination_node(dest));
   1.181 +}
   1.182 +
   1.183 +
   1.184 +//--------------------------------------------------------------
   1.185 +// LIRItem
   1.186 +
   1.187 +void LIRItem::set_result(LIR_Opr opr) {
   1.188 +  assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
   1.189 +  value()->set_operand(opr);
   1.190 +
   1.191 +  if (opr->is_virtual()) {
   1.192 +    _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
   1.193 +  }
   1.194 +
   1.195 +  _result = opr;
   1.196 +}
   1.197 +
   1.198 +void LIRItem::load_item() {
   1.199 +  if (result()->is_illegal()) {
   1.200 +    // update the items result
   1.201 +    _result = value()->operand();
   1.202 +  }
   1.203 +  if (!result()->is_register()) {
   1.204 +    LIR_Opr reg = _gen->new_register(value()->type());
   1.205 +    __ move(result(), reg);
   1.206 +    if (result()->is_constant()) {
   1.207 +      _result = reg;
   1.208 +    } else {
   1.209 +      set_result(reg);
   1.210 +    }
   1.211 +  }
   1.212 +}
   1.213 +
   1.214 +
   1.215 +void LIRItem::load_for_store(BasicType type) {
   1.216 +  if (_gen->can_store_as_constant(value(), type)) {
   1.217 +    _result = value()->operand();
   1.218 +    if (!_result->is_constant()) {
   1.219 +      _result = LIR_OprFact::value_type(value()->type());
   1.220 +    }
   1.221 +  } else if (type == T_BYTE || type == T_BOOLEAN) {
   1.222 +    load_byte_item();
   1.223 +  } else {
   1.224 +    load_item();
   1.225 +  }
   1.226 +}
   1.227 +
   1.228 +void LIRItem::load_item_force(LIR_Opr reg) {
   1.229 +  LIR_Opr r = result();
   1.230 +  if (r != reg) {
   1.231 +    if (r->type() != reg->type()) {
   1.232 +      // moves between different types need an intervening spill slot
   1.233 +      LIR_Opr tmp = _gen->force_to_spill(r, reg->type());
   1.234 +      __ move(tmp, reg);
   1.235 +    } else {
   1.236 +      __ move(r, reg);
   1.237 +    }
   1.238 +    _result = reg;
   1.239 +  }
   1.240 +}
   1.241 +
   1.242 +ciObject* LIRItem::get_jobject_constant() const {
   1.243 +  ObjectType* oc = type()->as_ObjectType();
   1.244 +  if (oc) {
   1.245 +    return oc->constant_value();
   1.246 +  }
   1.247 +  return NULL;
   1.248 +}
   1.249 +
   1.250 +
   1.251 +jint LIRItem::get_jint_constant() const {
   1.252 +  assert(is_constant() && value() != NULL, "");
   1.253 +  assert(type()->as_IntConstant() != NULL, "type check");
   1.254 +  return type()->as_IntConstant()->value();
   1.255 +}
   1.256 +
   1.257 +
   1.258 +jint LIRItem::get_address_constant() const {
   1.259 +  assert(is_constant() && value() != NULL, "");
   1.260 +  assert(type()->as_AddressConstant() != NULL, "type check");
   1.261 +  return type()->as_AddressConstant()->value();
   1.262 +}
   1.263 +
   1.264 +
   1.265 +jfloat LIRItem::get_jfloat_constant() const {
   1.266 +  assert(is_constant() && value() != NULL, "");
   1.267 +  assert(type()->as_FloatConstant() != NULL, "type check");
   1.268 +  return type()->as_FloatConstant()->value();
   1.269 +}
   1.270 +
   1.271 +
   1.272 +jdouble LIRItem::get_jdouble_constant() const {
   1.273 +  assert(is_constant() && value() != NULL, "");
   1.274 +  assert(type()->as_DoubleConstant() != NULL, "type check");
   1.275 +  return type()->as_DoubleConstant()->value();
   1.276 +}
   1.277 +
   1.278 +
   1.279 +jlong LIRItem::get_jlong_constant() const {
   1.280 +  assert(is_constant() && value() != NULL, "");
   1.281 +  assert(type()->as_LongConstant() != NULL, "type check");
   1.282 +  return type()->as_LongConstant()->value();
   1.283 +}
   1.284 +
   1.285 +
   1.286 +
   1.287 +//--------------------------------------------------------------
   1.288 +
   1.289 +
   1.290 +void LIRGenerator::init() {
   1.291 +  BarrierSet* bs = Universe::heap()->barrier_set();
   1.292 +  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
   1.293 +  CardTableModRefBS* ct = (CardTableModRefBS*)bs;
   1.294 +  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
   1.295 +
   1.296 +#ifdef _LP64
   1.297 +  _card_table_base = new LIR_Const((jlong)ct->byte_map_base);
   1.298 +#else
   1.299 +  _card_table_base = new LIR_Const((jint)ct->byte_map_base);
   1.300 +#endif
   1.301 +}
   1.302 +
   1.303 +
   1.304 +void LIRGenerator::block_do_prolog(BlockBegin* block) {
   1.305 +#ifndef PRODUCT
   1.306 +  if (PrintIRWithLIR) {
   1.307 +    block->print();
   1.308 +  }
   1.309 +#endif
   1.310 +
   1.311 +  // set up the list of LIR instructions
   1.312 +  assert(block->lir() == NULL, "LIR list already computed for this block");
   1.313 +  _lir = new LIR_List(compilation(), block);
   1.314 +  block->set_lir(_lir);
   1.315 +
   1.316 +  __ branch_destination(block->label());
   1.317 +
   1.318 +  if (LIRTraceExecution &&
   1.319 +      Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() &&
   1.320 +      !block->is_set(BlockBegin::exception_entry_flag)) {
   1.321 +    assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
   1.322 +    trace_block_entry(block);
   1.323 +  }
   1.324 +}
   1.325 +
   1.326 +
   1.327 +void LIRGenerator::block_do_epilog(BlockBegin* block) {
   1.328 +#ifndef PRODUCT
   1.329 +  if (PrintIRWithLIR) {
   1.330 +    tty->cr();
   1.331 +  }
   1.332 +#endif
   1.333 +
   1.334 +  // LIR_Opr for unpinned constants shouldn't be referenced by other
   1.335 +  // blocks so clear them out after processing the block.
   1.336 +  for (int i = 0; i < _unpinned_constants.length(); i++) {
   1.337 +    _unpinned_constants.at(i)->clear_operand();
   1.338 +  }
   1.339 +  _unpinned_constants.trunc_to(0);
   1.340 +
   1.341 +  // clear our any registers for other local constants
   1.342 +  _constants.trunc_to(0);
   1.343 +  _reg_for_constants.trunc_to(0);
   1.344 +}
   1.345 +
   1.346 +
   1.347 +void LIRGenerator::block_do(BlockBegin* block) {
   1.348 +  CHECK_BAILOUT();
   1.349 +
   1.350 +  block_do_prolog(block);
   1.351 +  set_block(block);
   1.352 +
   1.353 +  for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
   1.354 +    if (instr->is_pinned()) do_root(instr);
   1.355 +  }
   1.356 +
   1.357 +  set_block(NULL);
   1.358 +  block_do_epilog(block);
   1.359 +}
   1.360 +
   1.361 +
   1.362 +//-------------------------LIRGenerator-----------------------------
   1.363 +
   1.364 +// This is where the tree-walk starts; instr must be root;
   1.365 +void LIRGenerator::do_root(Value instr) {
   1.366 +  CHECK_BAILOUT();
   1.367 +
   1.368 +  InstructionMark im(compilation(), instr);
   1.369 +
   1.370 +  assert(instr->is_pinned(), "use only with roots");
   1.371 +  assert(instr->subst() == instr, "shouldn't have missed substitution");
   1.372 +
   1.373 +  instr->visit(this);
   1.374 +
   1.375 +  assert(!instr->has_uses() || instr->operand()->is_valid() ||
   1.376 +         instr->as_Constant() != NULL || bailed_out(), "invalid item set");
   1.377 +}
   1.378 +
   1.379 +
   1.380 +// This is called for each node in tree; the walk stops if a root is reached
   1.381 +void LIRGenerator::walk(Value instr) {
   1.382 +  InstructionMark im(compilation(), instr);
   1.383 +  //stop walk when encounter a root
   1.384 +  if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
   1.385 +    assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
   1.386 +  } else {
   1.387 +    assert(instr->subst() == instr, "shouldn't have missed substitution");
   1.388 +    instr->visit(this);
   1.389 +    // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
   1.390 +  }
   1.391 +}
   1.392 +
   1.393 +
   1.394 +CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
   1.395 +  int index;
   1.396 +  Value value;
   1.397 +  for_each_stack_value(state, index, value) {
   1.398 +    assert(value->subst() == value, "missed substition");
   1.399 +    if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
   1.400 +      walk(value);
   1.401 +      assert(value->operand()->is_valid(), "must be evaluated now");
   1.402 +    }
   1.403 +  }
   1.404 +  ValueStack* s = state;
   1.405 +  int bci = x->bci();
   1.406 +  for_each_state(s) {
   1.407 +    IRScope* scope = s->scope();
   1.408 +    ciMethod* method = scope->method();
   1.409 +
   1.410 +    MethodLivenessResult liveness = method->liveness_at_bci(bci);
   1.411 +    if (bci == SynchronizationEntryBCI) {
   1.412 +      if (x->as_ExceptionObject() || x->as_Throw()) {
   1.413 +        // all locals are dead on exit from the synthetic unlocker
   1.414 +        liveness.clear();
   1.415 +      } else {
   1.416 +        assert(x->as_MonitorEnter(), "only other case is MonitorEnter");
   1.417 +      }
   1.418 +    }
   1.419 +    if (!liveness.is_valid()) {
   1.420 +      // Degenerate or breakpointed method.
   1.421 +      bailout("Degenerate or breakpointed method");
   1.422 +    } else {
   1.423 +      assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
   1.424 +      for_each_local_value(s, index, value) {
   1.425 +        assert(value->subst() == value, "missed substition");
   1.426 +        if (liveness.at(index) && !value->type()->is_illegal()) {
   1.427 +          if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
   1.428 +            walk(value);
   1.429 +            assert(value->operand()->is_valid(), "must be evaluated now");
   1.430 +          }
   1.431 +        } else {
   1.432 +          // NULL out this local so that linear scan can assume that all non-NULL values are live.
   1.433 +          s->invalidate_local(index);
   1.434 +        }
   1.435 +      }
   1.436 +    }
   1.437 +    bci = scope->caller_bci();
   1.438 +  }
   1.439 +
   1.440 +  return new CodeEmitInfo(x->bci(), state, ignore_xhandler ? NULL : x->exception_handlers());
   1.441 +}
   1.442 +
   1.443 +
   1.444 +CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
   1.445 +  return state_for(x, x->lock_stack());
   1.446 +}
   1.447 +
   1.448 +
   1.449 +void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) {
   1.450 +  if (!obj->is_loaded() || PatchALot) {
   1.451 +    assert(info != NULL, "info must be set if class is not loaded");
   1.452 +    __ oop2reg_patch(NULL, r, info);
   1.453 +  } else {
   1.454 +    // no patching needed
   1.455 +    __ oop2reg(obj->encoding(), r);
   1.456 +  }
   1.457 +}
   1.458 +
   1.459 +
   1.460 +void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
   1.461 +                                    CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
   1.462 +  CodeStub* stub = new RangeCheckStub(range_check_info, index);
   1.463 +  if (index->is_constant()) {
   1.464 +    cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
   1.465 +                index->as_jint(), null_check_info);
   1.466 +    __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
   1.467 +  } else {
   1.468 +    cmp_reg_mem(lir_cond_aboveEqual, index, array,
   1.469 +                arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
   1.470 +    __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
   1.471 +  }
   1.472 +}
   1.473 +
   1.474 +
   1.475 +void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
   1.476 +  CodeStub* stub = new RangeCheckStub(info, index, true);
   1.477 +  if (index->is_constant()) {
   1.478 +    cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
   1.479 +    __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
   1.480 +  } else {
   1.481 +    cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
   1.482 +                java_nio_Buffer::limit_offset(), T_INT, info);
   1.483 +    __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
   1.484 +  }
   1.485 +  __ move(index, result);
   1.486 +}
   1.487 +
   1.488 +
   1.489 +// increment a counter returning the incremented value
   1.490 +LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) {
   1.491 +  LIR_Address* counter = new LIR_Address(base, offset, T_INT);
   1.492 +  LIR_Opr result = new_register(T_INT);
   1.493 +  __ load(counter, result);
   1.494 +  __ add(result, LIR_OprFact::intConst(increment), result);
   1.495 +  __ store(result, counter);
   1.496 +  return result;
   1.497 +}
   1.498 +
   1.499 +
   1.500 +void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
   1.501 +  LIR_Opr result_op = result;
   1.502 +  LIR_Opr left_op   = left;
   1.503 +  LIR_Opr right_op  = right;
   1.504 +
   1.505 +  if (TwoOperandLIRForm && left_op != result_op) {
   1.506 +    assert(right_op != result_op, "malformed");
   1.507 +    __ move(left_op, result_op);
   1.508 +    left_op = result_op;
   1.509 +  }
   1.510 +
   1.511 +  switch(code) {
   1.512 +    case Bytecodes::_dadd:
   1.513 +    case Bytecodes::_fadd:
   1.514 +    case Bytecodes::_ladd:
   1.515 +    case Bytecodes::_iadd:  __ add(left_op, right_op, result_op); break;
   1.516 +    case Bytecodes::_fmul:
   1.517 +    case Bytecodes::_lmul:  __ mul(left_op, right_op, result_op); break;
   1.518 +
   1.519 +    case Bytecodes::_dmul:
   1.520 +      {
   1.521 +        if (is_strictfp) {
   1.522 +          __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
   1.523 +        } else {
   1.524 +          __ mul(left_op, right_op, result_op); break;
   1.525 +        }
   1.526 +      }
   1.527 +      break;
   1.528 +
   1.529 +    case Bytecodes::_imul:
   1.530 +      {
   1.531 +        bool    did_strength_reduce = false;
   1.532 +
   1.533 +        if (right->is_constant()) {
   1.534 +          int c = right->as_jint();
   1.535 +          if (is_power_of_2(c)) {
   1.536 +            // do not need tmp here
   1.537 +            __ shift_left(left_op, exact_log2(c), result_op);
   1.538 +            did_strength_reduce = true;
   1.539 +          } else {
   1.540 +            did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
   1.541 +          }
   1.542 +        }
   1.543 +        // we couldn't strength reduce so just emit the multiply
   1.544 +        if (!did_strength_reduce) {
   1.545 +          __ mul(left_op, right_op, result_op);
   1.546 +        }
   1.547 +      }
   1.548 +      break;
   1.549 +
   1.550 +    case Bytecodes::_dsub:
   1.551 +    case Bytecodes::_fsub:
   1.552 +    case Bytecodes::_lsub:
   1.553 +    case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
   1.554 +
   1.555 +    case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
   1.556 +    // ldiv and lrem are implemented with a direct runtime call
   1.557 +
   1.558 +    case Bytecodes::_ddiv:
   1.559 +      {
   1.560 +        if (is_strictfp) {
   1.561 +          __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
   1.562 +        } else {
   1.563 +          __ div (left_op, right_op, result_op); break;
   1.564 +        }
   1.565 +      }
   1.566 +      break;
   1.567 +
   1.568 +    case Bytecodes::_drem:
   1.569 +    case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
   1.570 +
   1.571 +    default: ShouldNotReachHere();
   1.572 +  }
   1.573 +}
   1.574 +
   1.575 +
   1.576 +void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
   1.577 +  arithmetic_op(code, result, left, right, false, tmp);
   1.578 +}
   1.579 +
   1.580 +
   1.581 +void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
   1.582 +  arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
   1.583 +}
   1.584 +
   1.585 +
   1.586 +void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
   1.587 +  arithmetic_op(code, result, left, right, is_strictfp, tmp);
   1.588 +}
   1.589 +
   1.590 +
   1.591 +void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
   1.592 +  if (TwoOperandLIRForm && value != result_op) {
   1.593 +    assert(count != result_op, "malformed");
   1.594 +    __ move(value, result_op);
   1.595 +    value = result_op;
   1.596 +  }
   1.597 +
   1.598 +  assert(count->is_constant() || count->is_register(), "must be");
   1.599 +  switch(code) {
   1.600 +  case Bytecodes::_ishl:
   1.601 +  case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
   1.602 +  case Bytecodes::_ishr:
   1.603 +  case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
   1.604 +  case Bytecodes::_iushr:
   1.605 +  case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
   1.606 +  default: ShouldNotReachHere();
   1.607 +  }
   1.608 +}
   1.609 +
   1.610 +
   1.611 +void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
   1.612 +  if (TwoOperandLIRForm && left_op != result_op) {
   1.613 +    assert(right_op != result_op, "malformed");
   1.614 +    __ move(left_op, result_op);
   1.615 +    left_op = result_op;
   1.616 +  }
   1.617 +
   1.618 +  switch(code) {
   1.619 +    case Bytecodes::_iand:
   1.620 +    case Bytecodes::_land:  __ logical_and(left_op, right_op, result_op); break;
   1.621 +
   1.622 +    case Bytecodes::_ior:
   1.623 +    case Bytecodes::_lor:   __ logical_or(left_op, right_op, result_op);  break;
   1.624 +
   1.625 +    case Bytecodes::_ixor:
   1.626 +    case Bytecodes::_lxor:  __ logical_xor(left_op, right_op, result_op); break;
   1.627 +
   1.628 +    default: ShouldNotReachHere();
   1.629 +  }
   1.630 +}
   1.631 +
   1.632 +
   1.633 +void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
   1.634 +  if (!GenerateSynchronizationCode) return;
   1.635 +  // for slow path, use debug info for state after successful locking
   1.636 +  CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
   1.637 +  __ load_stack_address_monitor(monitor_no, lock);
   1.638 +  // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
   1.639 +  __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
   1.640 +}
   1.641 +
   1.642 +
   1.643 +void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) {
   1.644 +  if (!GenerateSynchronizationCode) return;
   1.645 +  // setup registers
   1.646 +  LIR_Opr hdr = lock;
   1.647 +  lock = new_hdr;
   1.648 +  CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
   1.649 +  __ load_stack_address_monitor(monitor_no, lock);
   1.650 +  __ unlock_object(hdr, object, lock, slow_path);
   1.651 +}
   1.652 +
   1.653 +
   1.654 +void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
   1.655 +  jobject2reg_with_patching(klass_reg, klass, info);
   1.656 +  // If klass is not loaded we do not know if the klass has finalizers:
   1.657 +  if (UseFastNewInstance && klass->is_loaded()
   1.658 +      && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
   1.659 +
   1.660 +    Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
   1.661 +
   1.662 +    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
   1.663 +
   1.664 +    assert(klass->is_loaded(), "must be loaded");
   1.665 +    // allocate space for instance
   1.666 +    assert(klass->size_helper() >= 0, "illegal instance size");
   1.667 +    const int instance_size = align_object_size(klass->size_helper());
   1.668 +    __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
   1.669 +                       oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
   1.670 +  } else {
   1.671 +    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
   1.672 +    __ branch(lir_cond_always, T_ILLEGAL, slow_path);
   1.673 +    __ branch_destination(slow_path->continuation());
   1.674 +  }
   1.675 +}
   1.676 +
   1.677 +
   1.678 +static bool is_constant_zero(Instruction* inst) {
   1.679 +  IntConstant* c = inst->type()->as_IntConstant();
   1.680 +  if (c) {
   1.681 +    return (c->value() == 0);
   1.682 +  }
   1.683 +  return false;
   1.684 +}
   1.685 +
   1.686 +
   1.687 +static bool positive_constant(Instruction* inst) {
   1.688 +  IntConstant* c = inst->type()->as_IntConstant();
   1.689 +  if (c) {
   1.690 +    return (c->value() >= 0);
   1.691 +  }
   1.692 +  return false;
   1.693 +}
   1.694 +
   1.695 +
   1.696 +static ciArrayKlass* as_array_klass(ciType* type) {
   1.697 +  if (type != NULL && type->is_array_klass() && type->is_loaded()) {
   1.698 +    return (ciArrayKlass*)type;
   1.699 +  } else {
   1.700 +    return NULL;
   1.701 +  }
   1.702 +}
   1.703 +
   1.704 +void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
   1.705 +  Instruction* src     = x->argument_at(0);
   1.706 +  Instruction* src_pos = x->argument_at(1);
   1.707 +  Instruction* dst     = x->argument_at(2);
   1.708 +  Instruction* dst_pos = x->argument_at(3);
   1.709 +  Instruction* length  = x->argument_at(4);
   1.710 +
   1.711 +  // first try to identify the likely type of the arrays involved
   1.712 +  ciArrayKlass* expected_type = NULL;
   1.713 +  bool is_exact = false;
   1.714 +  {
   1.715 +    ciArrayKlass* src_exact_type    = as_array_klass(src->exact_type());
   1.716 +    ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
   1.717 +    ciArrayKlass* dst_exact_type    = as_array_klass(dst->exact_type());
   1.718 +    ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
   1.719 +    if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
   1.720 +      // the types exactly match so the type is fully known
   1.721 +      is_exact = true;
   1.722 +      expected_type = src_exact_type;
   1.723 +    } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
   1.724 +      ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
   1.725 +      ciArrayKlass* src_type = NULL;
   1.726 +      if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
   1.727 +        src_type = (ciArrayKlass*) src_exact_type;
   1.728 +      } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
   1.729 +        src_type = (ciArrayKlass*) src_declared_type;
   1.730 +      }
   1.731 +      if (src_type != NULL) {
   1.732 +        if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
   1.733 +          is_exact = true;
   1.734 +          expected_type = dst_type;
   1.735 +        }
   1.736 +      }
   1.737 +    }
   1.738 +    // at least pass along a good guess
   1.739 +    if (expected_type == NULL) expected_type = dst_exact_type;
   1.740 +    if (expected_type == NULL) expected_type = src_declared_type;
   1.741 +    if (expected_type == NULL) expected_type = dst_declared_type;
   1.742 +  }
   1.743 +
   1.744 +  // if a probable array type has been identified, figure out if any
   1.745 +  // of the required checks for a fast case can be elided.
   1.746 +  int flags = LIR_OpArrayCopy::all_flags;
   1.747 +  if (expected_type != NULL) {
   1.748 +    // try to skip null checks
   1.749 +    if (src->as_NewArray() != NULL)
   1.750 +      flags &= ~LIR_OpArrayCopy::src_null_check;
   1.751 +    if (dst->as_NewArray() != NULL)
   1.752 +      flags &= ~LIR_OpArrayCopy::dst_null_check;
   1.753 +
   1.754 +    // check from incoming constant values
   1.755 +    if (positive_constant(src_pos))
   1.756 +      flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
   1.757 +    if (positive_constant(dst_pos))
   1.758 +      flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
   1.759 +    if (positive_constant(length))
   1.760 +      flags &= ~LIR_OpArrayCopy::length_positive_check;
   1.761 +
   1.762 +    // see if the range check can be elided, which might also imply
   1.763 +    // that src or dst is non-null.
   1.764 +    ArrayLength* al = length->as_ArrayLength();
   1.765 +    if (al != NULL) {
   1.766 +      if (al->array() == src) {
   1.767 +        // it's the length of the source array
   1.768 +        flags &= ~LIR_OpArrayCopy::length_positive_check;
   1.769 +        flags &= ~LIR_OpArrayCopy::src_null_check;
   1.770 +        if (is_constant_zero(src_pos))
   1.771 +          flags &= ~LIR_OpArrayCopy::src_range_check;
   1.772 +      }
   1.773 +      if (al->array() == dst) {
   1.774 +        // it's the length of the destination array
   1.775 +        flags &= ~LIR_OpArrayCopy::length_positive_check;
   1.776 +        flags &= ~LIR_OpArrayCopy::dst_null_check;
   1.777 +        if (is_constant_zero(dst_pos))
   1.778 +          flags &= ~LIR_OpArrayCopy::dst_range_check;
   1.779 +      }
   1.780 +    }
   1.781 +    if (is_exact) {
   1.782 +      flags &= ~LIR_OpArrayCopy::type_check;
   1.783 +    }
   1.784 +  }
   1.785 +
   1.786 +  if (src == dst) {
   1.787 +    // moving within a single array so no type checks are needed
   1.788 +    if (flags & LIR_OpArrayCopy::type_check) {
   1.789 +      flags &= ~LIR_OpArrayCopy::type_check;
   1.790 +    }
   1.791 +  }
   1.792 +  *flagsp = flags;
   1.793 +  *expected_typep = (ciArrayKlass*)expected_type;
   1.794 +}
   1.795 +
   1.796 +
   1.797 +LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
   1.798 +  assert(opr->is_register(), "why spill if item is not register?");
   1.799 +
   1.800 +  if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
   1.801 +    LIR_Opr result = new_register(T_FLOAT);
   1.802 +    set_vreg_flag(result, must_start_in_memory);
   1.803 +    assert(opr->is_register(), "only a register can be spilled");
   1.804 +    assert(opr->value_type()->is_float(), "rounding only for floats available");
   1.805 +    __ roundfp(opr, LIR_OprFact::illegalOpr, result);
   1.806 +    return result;
   1.807 +  }
   1.808 +  return opr;
   1.809 +}
   1.810 +
   1.811 +
   1.812 +LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
   1.813 +  assert(type2size[t] == type2size[value->type()], "size mismatch");
   1.814 +  if (!value->is_register()) {
   1.815 +    // force into a register
   1.816 +    LIR_Opr r = new_register(value->type());
   1.817 +    __ move(value, r);
   1.818 +    value = r;
   1.819 +  }
   1.820 +
   1.821 +  // create a spill location
   1.822 +  LIR_Opr tmp = new_register(t);
   1.823 +  set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
   1.824 +
   1.825 +  // move from register to spill
   1.826 +  __ move(value, tmp);
   1.827 +  return tmp;
   1.828 +}
   1.829 +
   1.830 +
   1.831 +void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
   1.832 +  if (if_instr->should_profile()) {
   1.833 +    ciMethod* method = if_instr->profiled_method();
   1.834 +    assert(method != NULL, "method should be set if branch is profiled");
   1.835 +    ciMethodData* md = method->method_data();
   1.836 +    if (md == NULL) {
   1.837 +      bailout("out of memory building methodDataOop");
   1.838 +      return;
   1.839 +    }
   1.840 +    ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
   1.841 +    assert(data != NULL, "must have profiling data");
   1.842 +    assert(data->is_BranchData(), "need BranchData for two-way branches");
   1.843 +    int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
   1.844 +    int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
   1.845 +    LIR_Opr md_reg = new_register(T_OBJECT);
   1.846 +    __ move(LIR_OprFact::oopConst(md->encoding()), md_reg);
   1.847 +    LIR_Opr data_offset_reg = new_register(T_INT);
   1.848 +    __ cmove(lir_cond(cond),
   1.849 +             LIR_OprFact::intConst(taken_count_offset),
   1.850 +             LIR_OprFact::intConst(not_taken_count_offset),
   1.851 +             data_offset_reg);
   1.852 +    LIR_Opr data_reg = new_register(T_INT);
   1.853 +    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
   1.854 +    __ move(LIR_OprFact::address(data_addr), data_reg);
   1.855 +    LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
   1.856 +    // Use leal instead of add to avoid destroying condition codes on x86
   1.857 +    __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
   1.858 +    __ move(data_reg, LIR_OprFact::address(data_addr));
   1.859 +  }
   1.860 +}
   1.861 +
   1.862 +
   1.863 +// Phi technique:
   1.864 +// This is about passing live values from one basic block to the other.
   1.865 +// In code generated with Java it is rather rare that more than one
   1.866 +// value is on the stack from one basic block to the other.
   1.867 +// We optimize our technique for efficient passing of one value
   1.868 +// (of type long, int, double..) but it can be extended.
   1.869 +// When entering or leaving a basic block, all registers and all spill
   1.870 +// slots are release and empty. We use the released registers
   1.871 +// and spill slots to pass the live values from one block
   1.872 +// to the other. The topmost value, i.e., the value on TOS of expression
   1.873 +// stack is passed in registers. All other values are stored in spilling
   1.874 +// area. Every Phi has an index which designates its spill slot
   1.875 +// At exit of a basic block, we fill the register(s) and spill slots.
   1.876 +// At entry of a basic block, the block_prolog sets up the content of phi nodes
   1.877 +// and locks necessary registers and spilling slots.
   1.878 +
   1.879 +
   1.880 +// move current value to referenced phi function
   1.881 +void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
   1.882 +  Phi* phi = sux_val->as_Phi();
   1.883 +  // cur_val can be null without phi being null in conjunction with inlining
   1.884 +  if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
   1.885 +    LIR_Opr operand = cur_val->operand();
   1.886 +    if (cur_val->operand()->is_illegal()) {
   1.887 +      assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
   1.888 +             "these can be produced lazily");
   1.889 +      operand = operand_for_instruction(cur_val);
   1.890 +    }
   1.891 +    resolver->move(operand, operand_for_instruction(phi));
   1.892 +  }
   1.893 +}
   1.894 +
   1.895 +
   1.896 +// Moves all stack values into their PHI position
   1.897 +void LIRGenerator::move_to_phi(ValueStack* cur_state) {
   1.898 +  BlockBegin* bb = block();
   1.899 +  if (bb->number_of_sux() == 1) {
   1.900 +    BlockBegin* sux = bb->sux_at(0);
   1.901 +    assert(sux->number_of_preds() > 0, "invalid CFG");
   1.902 +
   1.903 +    // a block with only one predecessor never has phi functions
   1.904 +    if (sux->number_of_preds() > 1) {
   1.905 +      int max_phis = cur_state->stack_size() + cur_state->locals_size();
   1.906 +      PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
   1.907 +
   1.908 +      ValueStack* sux_state = sux->state();
   1.909 +      Value sux_value;
   1.910 +      int index;
   1.911 +
   1.912 +      for_each_stack_value(sux_state, index, sux_value) {
   1.913 +        move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
   1.914 +      }
   1.915 +
   1.916 +      // Inlining may cause the local state not to match up, so walk up
   1.917 +      // the caller state until we get to the same scope as the
   1.918 +      // successor and then start processing from there.
   1.919 +      while (cur_state->scope() != sux_state->scope()) {
   1.920 +        cur_state = cur_state->caller_state();
   1.921 +        assert(cur_state != NULL, "scopes don't match up");
   1.922 +      }
   1.923 +
   1.924 +      for_each_local_value(sux_state, index, sux_value) {
   1.925 +        move_to_phi(&resolver, cur_state->local_at(index), sux_value);
   1.926 +      }
   1.927 +
   1.928 +      assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
   1.929 +    }
   1.930 +  }
   1.931 +}
   1.932 +
   1.933 +
   1.934 +LIR_Opr LIRGenerator::new_register(BasicType type) {
   1.935 +  int vreg = _virtual_register_number;
   1.936 +  // add a little fudge factor for the bailout, since the bailout is
   1.937 +  // only checked periodically.  This gives a few extra registers to
   1.938 +  // hand out before we really run out, which helps us keep from
   1.939 +  // tripping over assertions.
   1.940 +  if (vreg + 20 >= LIR_OprDesc::vreg_max) {
   1.941 +    bailout("out of virtual registers");
   1.942 +    if (vreg + 2 >= LIR_OprDesc::vreg_max) {
   1.943 +      // wrap it around
   1.944 +      _virtual_register_number = LIR_OprDesc::vreg_base;
   1.945 +    }
   1.946 +  }
   1.947 +  _virtual_register_number += 1;
   1.948 +  if (type == T_ADDRESS) type = T_INT;
   1.949 +  return LIR_OprFact::virtual_register(vreg, type);
   1.950 +}
   1.951 +
   1.952 +
   1.953 +// Try to lock using register in hint
   1.954 +LIR_Opr LIRGenerator::rlock(Value instr) {
   1.955 +  return new_register(instr->type());
   1.956 +}
   1.957 +
   1.958 +
   1.959 +// does an rlock and sets result
   1.960 +LIR_Opr LIRGenerator::rlock_result(Value x) {
   1.961 +  LIR_Opr reg = rlock(x);
   1.962 +  set_result(x, reg);
   1.963 +  return reg;
   1.964 +}
   1.965 +
   1.966 +
   1.967 +// does an rlock and sets result
   1.968 +LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
   1.969 +  LIR_Opr reg;
   1.970 +  switch (type) {
   1.971 +  case T_BYTE:
   1.972 +  case T_BOOLEAN:
   1.973 +    reg = rlock_byte(type);
   1.974 +    break;
   1.975 +  default:
   1.976 +    reg = rlock(x);
   1.977 +    break;
   1.978 +  }
   1.979 +
   1.980 +  set_result(x, reg);
   1.981 +  return reg;
   1.982 +}
   1.983 +
   1.984 +
   1.985 +//---------------------------------------------------------------------
   1.986 +ciObject* LIRGenerator::get_jobject_constant(Value value) {
   1.987 +  ObjectType* oc = value->type()->as_ObjectType();
   1.988 +  if (oc) {
   1.989 +    return oc->constant_value();
   1.990 +  }
   1.991 +  return NULL;
   1.992 +}
   1.993 +
   1.994 +
   1.995 +void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
   1.996 +  assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
   1.997 +  assert(block()->next() == x, "ExceptionObject must be first instruction of block");
   1.998 +
   1.999 +  // no moves are created for phi functions at the begin of exception
  1.1000 +  // handlers, so assign operands manually here
  1.1001 +  for_each_phi_fun(block(), phi,
  1.1002 +                   operand_for_instruction(phi));
  1.1003 +
  1.1004 +  LIR_Opr thread_reg = getThreadPointer();
  1.1005 +  __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
  1.1006 +          exceptionOopOpr());
  1.1007 +  __ move(LIR_OprFact::oopConst(NULL),
  1.1008 +          new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
  1.1009 +  __ move(LIR_OprFact::oopConst(NULL),
  1.1010 +          new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
  1.1011 +
  1.1012 +  LIR_Opr result = new_register(T_OBJECT);
  1.1013 +  __ move(exceptionOopOpr(), result);
  1.1014 +  set_result(x, result);
  1.1015 +}
  1.1016 +
  1.1017 +
  1.1018 +//----------------------------------------------------------------------
  1.1019 +//----------------------------------------------------------------------
  1.1020 +//----------------------------------------------------------------------
  1.1021 +//----------------------------------------------------------------------
  1.1022 +//                        visitor functions
  1.1023 +//----------------------------------------------------------------------
  1.1024 +//----------------------------------------------------------------------
  1.1025 +//----------------------------------------------------------------------
  1.1026 +//----------------------------------------------------------------------
  1.1027 +
  1.1028 +void LIRGenerator::do_Phi(Phi* x) {
  1.1029 +  // phi functions are never visited directly
  1.1030 +  ShouldNotReachHere();
  1.1031 +}
  1.1032 +
  1.1033 +
  1.1034 +// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
  1.1035 +void LIRGenerator::do_Constant(Constant* x) {
  1.1036 +  if (x->state() != NULL) {
  1.1037 +    // Any constant with a ValueStack requires patching so emit the patch here
  1.1038 +    LIR_Opr reg = rlock_result(x);
  1.1039 +    CodeEmitInfo* info = state_for(x, x->state());
  1.1040 +    __ oop2reg_patch(NULL, reg, info);
  1.1041 +  } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
  1.1042 +    if (!x->is_pinned()) {
  1.1043 +      // unpinned constants are handled specially so that they can be
  1.1044 +      // put into registers when they are used multiple times within a
  1.1045 +      // block.  After the block completes their operand will be
  1.1046 +      // cleared so that other blocks can't refer to that register.
  1.1047 +      set_result(x, load_constant(x));
  1.1048 +    } else {
  1.1049 +      LIR_Opr res = x->operand();
  1.1050 +      if (!res->is_valid()) {
  1.1051 +        res = LIR_OprFact::value_type(x->type());
  1.1052 +      }
  1.1053 +      if (res->is_constant()) {
  1.1054 +        LIR_Opr reg = rlock_result(x);
  1.1055 +        __ move(res, reg);
  1.1056 +      } else {
  1.1057 +        set_result(x, res);
  1.1058 +      }
  1.1059 +    }
  1.1060 +  } else {
  1.1061 +    set_result(x, LIR_OprFact::value_type(x->type()));
  1.1062 +  }
  1.1063 +}
  1.1064 +
  1.1065 +
  1.1066 +void LIRGenerator::do_Local(Local* x) {
  1.1067 +  // operand_for_instruction has the side effect of setting the result
  1.1068 +  // so there's no need to do it here.
  1.1069 +  operand_for_instruction(x);
  1.1070 +}
  1.1071 +
  1.1072 +
  1.1073 +void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
  1.1074 +  Unimplemented();
  1.1075 +}
  1.1076 +
  1.1077 +
  1.1078 +void LIRGenerator::do_Return(Return* x) {
  1.1079 +  if (DTraceMethodProbes) {
  1.1080 +    BasicTypeList signature;
  1.1081 +    signature.append(T_INT);    // thread
  1.1082 +    signature.append(T_OBJECT); // methodOop
  1.1083 +    LIR_OprList* args = new LIR_OprList();
  1.1084 +    args->append(getThreadPointer());
  1.1085 +    LIR_Opr meth = new_register(T_OBJECT);
  1.1086 +    __ oop2reg(method()->encoding(), meth);
  1.1087 +    args->append(meth);
  1.1088 +    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
  1.1089 +  }
  1.1090 +
  1.1091 +  if (x->type()->is_void()) {
  1.1092 +    __ return_op(LIR_OprFact::illegalOpr);
  1.1093 +  } else {
  1.1094 +    LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
  1.1095 +    LIRItem result(x->result(), this);
  1.1096 +
  1.1097 +    result.load_item_force(reg);
  1.1098 +    __ return_op(result.result());
  1.1099 +  }
  1.1100 +  set_no_result(x);
  1.1101 +}
  1.1102 +
  1.1103 +
  1.1104 +// Example: object.getClass ()
  1.1105 +void LIRGenerator::do_getClass(Intrinsic* x) {
  1.1106 +  assert(x->number_of_arguments() == 1, "wrong type");
  1.1107 +
  1.1108 +  LIRItem rcvr(x->argument_at(0), this);
  1.1109 +  rcvr.load_item();
  1.1110 +  LIR_Opr result = rlock_result(x);
  1.1111 +
  1.1112 +  // need to perform the null check on the rcvr
  1.1113 +  CodeEmitInfo* info = NULL;
  1.1114 +  if (x->needs_null_check()) {
  1.1115 +    info = state_for(x, x->state()->copy_locks());
  1.1116 +  }
  1.1117 +  __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
  1.1118 +  __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
  1.1119 +                          klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
  1.1120 +}
  1.1121 +
  1.1122 +
  1.1123 +// Example: Thread.currentThread()
  1.1124 +void LIRGenerator::do_currentThread(Intrinsic* x) {
  1.1125 +  assert(x->number_of_arguments() == 0, "wrong type");
  1.1126 +  LIR_Opr reg = rlock_result(x);
  1.1127 +  __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
  1.1128 +}
  1.1129 +
  1.1130 +
  1.1131 +void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
  1.1132 +  assert(x->number_of_arguments() == 1, "wrong type");
  1.1133 +  LIRItem receiver(x->argument_at(0), this);
  1.1134 +
  1.1135 +  receiver.load_item();
  1.1136 +  BasicTypeList signature;
  1.1137 +  signature.append(T_OBJECT); // receiver
  1.1138 +  LIR_OprList* args = new LIR_OprList();
  1.1139 +  args->append(receiver.result());
  1.1140 +  CodeEmitInfo* info = state_for(x, x->state());
  1.1141 +  call_runtime(&signature, args,
  1.1142 +               CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
  1.1143 +               voidType, info);
  1.1144 +
  1.1145 +  set_no_result(x);
  1.1146 +}
  1.1147 +
  1.1148 +
  1.1149 +//------------------------local access--------------------------------------
  1.1150 +
  1.1151 +LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
  1.1152 +  if (x->operand()->is_illegal()) {
  1.1153 +    Constant* c = x->as_Constant();
  1.1154 +    if (c != NULL) {
  1.1155 +      x->set_operand(LIR_OprFact::value_type(c->type()));
  1.1156 +    } else {
  1.1157 +      assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
  1.1158 +      // allocate a virtual register for this local or phi
  1.1159 +      x->set_operand(rlock(x));
  1.1160 +      _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
  1.1161 +    }
  1.1162 +  }
  1.1163 +  return x->operand();
  1.1164 +}
  1.1165 +
  1.1166 +
  1.1167 +Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
  1.1168 +  if (opr->is_virtual()) {
  1.1169 +    return instruction_for_vreg(opr->vreg_number());
  1.1170 +  }
  1.1171 +  return NULL;
  1.1172 +}
  1.1173 +
  1.1174 +
  1.1175 +Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
  1.1176 +  if (reg_num < _instruction_for_operand.length()) {
  1.1177 +    return _instruction_for_operand.at(reg_num);
  1.1178 +  }
  1.1179 +  return NULL;
  1.1180 +}
  1.1181 +
  1.1182 +
  1.1183 +void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
  1.1184 +  if (_vreg_flags.size_in_bits() == 0) {
  1.1185 +    BitMap2D temp(100, num_vreg_flags);
  1.1186 +    temp.clear();
  1.1187 +    _vreg_flags = temp;
  1.1188 +  }
  1.1189 +  _vreg_flags.at_put_grow(vreg_num, f, true);
  1.1190 +}
  1.1191 +
  1.1192 +bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
  1.1193 +  if (!_vreg_flags.is_valid_index(vreg_num, f)) {
  1.1194 +    return false;
  1.1195 +  }
  1.1196 +  return _vreg_flags.at(vreg_num, f);
  1.1197 +}
  1.1198 +
  1.1199 +
  1.1200 +// Block local constant handling.  This code is useful for keeping
  1.1201 +// unpinned constants and constants which aren't exposed in the IR in
  1.1202 +// registers.  Unpinned Constant instructions have their operands
  1.1203 +// cleared when the block is finished so that other blocks can't end
  1.1204 +// up referring to their registers.
  1.1205 +
  1.1206 +LIR_Opr LIRGenerator::load_constant(Constant* x) {
  1.1207 +  assert(!x->is_pinned(), "only for unpinned constants");
  1.1208 +  _unpinned_constants.append(x);
  1.1209 +  return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
  1.1210 +}
  1.1211 +
  1.1212 +
  1.1213 +LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
  1.1214 +  BasicType t = c->type();
  1.1215 +  for (int i = 0; i < _constants.length(); i++) {
  1.1216 +    LIR_Const* other = _constants.at(i);
  1.1217 +    if (t == other->type()) {
  1.1218 +      switch (t) {
  1.1219 +      case T_INT:
  1.1220 +      case T_FLOAT:
  1.1221 +        if (c->as_jint_bits() != other->as_jint_bits()) continue;
  1.1222 +        break;
  1.1223 +      case T_LONG:
  1.1224 +      case T_DOUBLE:
  1.1225 +        if (c->as_jint_hi_bits() != other->as_jint_lo_bits()) continue;
  1.1226 +        if (c->as_jint_lo_bits() != other->as_jint_hi_bits()) continue;
  1.1227 +        break;
  1.1228 +      case T_OBJECT:
  1.1229 +        if (c->as_jobject() != other->as_jobject()) continue;
  1.1230 +        break;
  1.1231 +      }
  1.1232 +      return _reg_for_constants.at(i);
  1.1233 +    }
  1.1234 +  }
  1.1235 +
  1.1236 +  LIR_Opr result = new_register(t);
  1.1237 +  __ move((LIR_Opr)c, result);
  1.1238 +  _constants.append(c);
  1.1239 +  _reg_for_constants.append(result);
  1.1240 +  return result;
  1.1241 +}
  1.1242 +
  1.1243 +// Various barriers
  1.1244 +
  1.1245 +void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  1.1246 +  switch (Universe::heap()->barrier_set()->kind()) {
  1.1247 +    case BarrierSet::CardTableModRef:
  1.1248 +    case BarrierSet::CardTableExtension:
  1.1249 +      CardTableModRef_post_barrier(addr,  new_val);
  1.1250 +      break;
  1.1251 +    case BarrierSet::ModRef:
  1.1252 +    case BarrierSet::Other:
  1.1253 +      // No post barriers
  1.1254 +      break;
  1.1255 +    default      :
  1.1256 +      ShouldNotReachHere();
  1.1257 +    }
  1.1258 +}
  1.1259 +
  1.1260 +void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
  1.1261 +
  1.1262 +  BarrierSet* bs = Universe::heap()->barrier_set();
  1.1263 +  assert(sizeof(*((CardTableModRefBS*)bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
  1.1264 +  LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)bs)->byte_map_base);
  1.1265 +  if (addr->is_address()) {
  1.1266 +    LIR_Address* address = addr->as_address_ptr();
  1.1267 +    LIR_Opr ptr = new_register(T_OBJECT);
  1.1268 +    if (!address->index()->is_valid() && address->disp() == 0) {
  1.1269 +      __ move(address->base(), ptr);
  1.1270 +    } else {
  1.1271 +      assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
  1.1272 +      __ leal(addr, ptr);
  1.1273 +    }
  1.1274 +    addr = ptr;
  1.1275 +  }
  1.1276 +  assert(addr->is_register(), "must be a register at this point");
  1.1277 +
  1.1278 +  LIR_Opr tmp = new_pointer_register();
  1.1279 +  if (TwoOperandLIRForm) {
  1.1280 +    __ move(addr, tmp);
  1.1281 +    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
  1.1282 +  } else {
  1.1283 +    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
  1.1284 +  }
  1.1285 +  if (can_inline_as_constant(card_table_base)) {
  1.1286 +    __ move(LIR_OprFact::intConst(0),
  1.1287 +              new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
  1.1288 +  } else {
  1.1289 +    __ move(LIR_OprFact::intConst(0),
  1.1290 +              new LIR_Address(tmp, load_constant(card_table_base),
  1.1291 +                              T_BYTE));
  1.1292 +  }
  1.1293 +}
  1.1294 +
  1.1295 +
  1.1296 +//------------------------field access--------------------------------------
  1.1297 +
  1.1298 +// Comment copied form templateTable_i486.cpp
  1.1299 +// ----------------------------------------------------------------------------
  1.1300 +// Volatile variables demand their effects be made known to all CPU's in
  1.1301 +// order.  Store buffers on most chips allow reads & writes to reorder; the
  1.1302 +// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
  1.1303 +// memory barrier (i.e., it's not sufficient that the interpreter does not
  1.1304 +// reorder volatile references, the hardware also must not reorder them).
  1.1305 +//
  1.1306 +// According to the new Java Memory Model (JMM):
  1.1307 +// (1) All volatiles are serialized wrt to each other.
  1.1308 +// ALSO reads & writes act as aquire & release, so:
  1.1309 +// (2) A read cannot let unrelated NON-volatile memory refs that happen after
  1.1310 +// the read float up to before the read.  It's OK for non-volatile memory refs
  1.1311 +// that happen before the volatile read to float down below it.
  1.1312 +// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
  1.1313 +// that happen BEFORE the write float down to after the write.  It's OK for
  1.1314 +// non-volatile memory refs that happen after the volatile write to float up
  1.1315 +// before it.
  1.1316 +//
  1.1317 +// We only put in barriers around volatile refs (they are expensive), not
  1.1318 +// _between_ memory refs (that would require us to track the flavor of the
  1.1319 +// previous memory refs).  Requirements (2) and (3) require some barriers
  1.1320 +// before volatile stores and after volatile loads.  These nearly cover
  1.1321 +// requirement (1) but miss the volatile-store-volatile-load case.  This final
  1.1322 +// case is placed after volatile-stores although it could just as well go
  1.1323 +// before volatile-loads.
  1.1324 +
  1.1325 +
  1.1326 +void LIRGenerator::do_StoreField(StoreField* x) {
  1.1327 +  bool needs_patching = x->needs_patching();
  1.1328 +  bool is_volatile = x->field()->is_volatile();
  1.1329 +  BasicType field_type = x->field_type();
  1.1330 +  bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
  1.1331 +
  1.1332 +  CodeEmitInfo* info = NULL;
  1.1333 +  if (needs_patching) {
  1.1334 +    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
  1.1335 +    info = state_for(x, x->state_before());
  1.1336 +  } else if (x->needs_null_check()) {
  1.1337 +    NullCheck* nc = x->explicit_null_check();
  1.1338 +    if (nc == NULL) {
  1.1339 +      info = state_for(x, x->lock_stack());
  1.1340 +    } else {
  1.1341 +      info = state_for(nc);
  1.1342 +    }
  1.1343 +  }
  1.1344 +
  1.1345 +
  1.1346 +  LIRItem object(x->obj(), this);
  1.1347 +  LIRItem value(x->value(),  this);
  1.1348 +
  1.1349 +  object.load_item();
  1.1350 +
  1.1351 +  if (is_volatile || needs_patching) {
  1.1352 +    // load item if field is volatile (fewer special cases for volatiles)
  1.1353 +    // load item if field not initialized
  1.1354 +    // load item if field not constant
  1.1355 +    // because of code patching we cannot inline constants
  1.1356 +    if (field_type == T_BYTE || field_type == T_BOOLEAN) {
  1.1357 +      value.load_byte_item();
  1.1358 +    } else  {
  1.1359 +      value.load_item();
  1.1360 +    }
  1.1361 +  } else {
  1.1362 +    value.load_for_store(field_type);
  1.1363 +  }
  1.1364 +
  1.1365 +  set_no_result(x);
  1.1366 +
  1.1367 +  if (PrintNotLoaded && needs_patching) {
  1.1368 +    tty->print_cr("   ###class not loaded at store_%s bci %d",
  1.1369 +                  x->is_static() ?  "static" : "field", x->bci());
  1.1370 +  }
  1.1371 +
  1.1372 +  if (x->needs_null_check() &&
  1.1373 +      (needs_patching ||
  1.1374 +       MacroAssembler::needs_explicit_null_check(x->offset()))) {
  1.1375 +    // emit an explicit null check because the offset is too large
  1.1376 +    __ null_check(object.result(), new CodeEmitInfo(info));
  1.1377 +  }
  1.1378 +
  1.1379 +  LIR_Address* address;
  1.1380 +  if (needs_patching) {
  1.1381 +    // we need to patch the offset in the instruction so don't allow
  1.1382 +    // generate_address to try to be smart about emitting the -1.
  1.1383 +    // Otherwise the patching code won't know how to find the
  1.1384 +    // instruction to patch.
  1.1385 +    address = new LIR_Address(object.result(), max_jint, field_type);
  1.1386 +  } else {
  1.1387 +    address = generate_address(object.result(), x->offset(), field_type);
  1.1388 +  }
  1.1389 +
  1.1390 +  if (is_volatile && os::is_MP()) {
  1.1391 +    __ membar_release();
  1.1392 +  }
  1.1393 +
  1.1394 +  if (is_volatile) {
  1.1395 +    assert(!needs_patching && x->is_loaded(),
  1.1396 +           "how do we know it's volatile if it's not loaded");
  1.1397 +    volatile_field_store(value.result(), address, info);
  1.1398 +  } else {
  1.1399 +    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
  1.1400 +    __ store(value.result(), address, info, patch_code);
  1.1401 +  }
  1.1402 +
  1.1403 +  if (is_oop) {
  1.1404 +    post_barrier(object.result(), value.result());
  1.1405 +  }
  1.1406 +
  1.1407 +  if (is_volatile && os::is_MP()) {
  1.1408 +    __ membar();
  1.1409 +  }
  1.1410 +}
  1.1411 +
  1.1412 +
  1.1413 +void LIRGenerator::do_LoadField(LoadField* x) {
  1.1414 +  bool needs_patching = x->needs_patching();
  1.1415 +  bool is_volatile = x->field()->is_volatile();
  1.1416 +  BasicType field_type = x->field_type();
  1.1417 +
  1.1418 +  CodeEmitInfo* info = NULL;
  1.1419 +  if (needs_patching) {
  1.1420 +    assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
  1.1421 +    info = state_for(x, x->state_before());
  1.1422 +  } else if (x->needs_null_check()) {
  1.1423 +    NullCheck* nc = x->explicit_null_check();
  1.1424 +    if (nc == NULL) {
  1.1425 +      info = state_for(x, x->lock_stack());
  1.1426 +    } else {
  1.1427 +      info = state_for(nc);
  1.1428 +    }
  1.1429 +  }
  1.1430 +
  1.1431 +  LIRItem object(x->obj(), this);
  1.1432 +
  1.1433 +  object.load_item();
  1.1434 +
  1.1435 +  if (PrintNotLoaded && needs_patching) {
  1.1436 +    tty->print_cr("   ###class not loaded at load_%s bci %d",
  1.1437 +                  x->is_static() ?  "static" : "field", x->bci());
  1.1438 +  }
  1.1439 +
  1.1440 +  if (x->needs_null_check() &&
  1.1441 +      (needs_patching ||
  1.1442 +       MacroAssembler::needs_explicit_null_check(x->offset()))) {
  1.1443 +    // emit an explicit null check because the offset is too large
  1.1444 +    __ null_check(object.result(), new CodeEmitInfo(info));
  1.1445 +  }
  1.1446 +
  1.1447 +  LIR_Opr reg = rlock_result(x, field_type);
  1.1448 +  LIR_Address* address;
  1.1449 +  if (needs_patching) {
  1.1450 +    // we need to patch the offset in the instruction so don't allow
  1.1451 +    // generate_address to try to be smart about emitting the -1.
  1.1452 +    // Otherwise the patching code won't know how to find the
  1.1453 +    // instruction to patch.
  1.1454 +    address = new LIR_Address(object.result(), max_jint, field_type);
  1.1455 +  } else {
  1.1456 +    address = generate_address(object.result(), x->offset(), field_type);
  1.1457 +  }
  1.1458 +
  1.1459 +  if (is_volatile) {
  1.1460 +    assert(!needs_patching && x->is_loaded(),
  1.1461 +           "how do we know it's volatile if it's not loaded");
  1.1462 +    volatile_field_load(address, reg, info);
  1.1463 +  } else {
  1.1464 +    LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
  1.1465 +    __ load(address, reg, info, patch_code);
  1.1466 +  }
  1.1467 +
  1.1468 +  if (is_volatile && os::is_MP()) {
  1.1469 +    __ membar_acquire();
  1.1470 +  }
  1.1471 +}
  1.1472 +
  1.1473 +
  1.1474 +//------------------------java.nio.Buffer.checkIndex------------------------
  1.1475 +
  1.1476 +// int java.nio.Buffer.checkIndex(int)
  1.1477 +void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
  1.1478 +  // NOTE: by the time we are in checkIndex() we are guaranteed that
  1.1479 +  // the buffer is non-null (because checkIndex is package-private and
  1.1480 +  // only called from within other methods in the buffer).
  1.1481 +  assert(x->number_of_arguments() == 2, "wrong type");
  1.1482 +  LIRItem buf  (x->argument_at(0), this);
  1.1483 +  LIRItem index(x->argument_at(1), this);
  1.1484 +  buf.load_item();
  1.1485 +  index.load_item();
  1.1486 +
  1.1487 +  LIR_Opr result = rlock_result(x);
  1.1488 +  if (GenerateRangeChecks) {
  1.1489 +    CodeEmitInfo* info = state_for(x);
  1.1490 +    CodeStub* stub = new RangeCheckStub(info, index.result(), true);
  1.1491 +    if (index.result()->is_constant()) {
  1.1492 +      cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
  1.1493 +      __ branch(lir_cond_belowEqual, T_INT, stub);
  1.1494 +    } else {
  1.1495 +      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
  1.1496 +                  java_nio_Buffer::limit_offset(), T_INT, info);
  1.1497 +      __ branch(lir_cond_aboveEqual, T_INT, stub);
  1.1498 +    }
  1.1499 +    __ move(index.result(), result);
  1.1500 +  } else {
  1.1501 +    // Just load the index into the result register
  1.1502 +    __ move(index.result(), result);
  1.1503 +  }
  1.1504 +}
  1.1505 +
  1.1506 +
  1.1507 +//------------------------array access--------------------------------------
  1.1508 +
  1.1509 +
  1.1510 +void LIRGenerator::do_ArrayLength(ArrayLength* x) {
  1.1511 +  LIRItem array(x->array(), this);
  1.1512 +  array.load_item();
  1.1513 +  LIR_Opr reg = rlock_result(x);
  1.1514 +
  1.1515 +  CodeEmitInfo* info = NULL;
  1.1516 +  if (x->needs_null_check()) {
  1.1517 +    NullCheck* nc = x->explicit_null_check();
  1.1518 +    if (nc == NULL) {
  1.1519 +      info = state_for(x);
  1.1520 +    } else {
  1.1521 +      info = state_for(nc);
  1.1522 +    }
  1.1523 +  }
  1.1524 +  __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
  1.1525 +}
  1.1526 +
  1.1527 +
  1.1528 +void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
  1.1529 +  bool use_length = x->length() != NULL;
  1.1530 +  LIRItem array(x->array(), this);
  1.1531 +  LIRItem index(x->index(), this);
  1.1532 +  LIRItem length(this);
  1.1533 +  bool needs_range_check = true;
  1.1534 +
  1.1535 +  if (use_length) {
  1.1536 +    needs_range_check = x->compute_needs_range_check();
  1.1537 +    if (needs_range_check) {
  1.1538 +      length.set_instruction(x->length());
  1.1539 +      length.load_item();
  1.1540 +    }
  1.1541 +  }
  1.1542 +
  1.1543 +  array.load_item();
  1.1544 +  if (index.is_constant() && can_inline_as_constant(x->index())) {
  1.1545 +    // let it be a constant
  1.1546 +    index.dont_load_item();
  1.1547 +  } else {
  1.1548 +    index.load_item();
  1.1549 +  }
  1.1550 +
  1.1551 +  CodeEmitInfo* range_check_info = state_for(x);
  1.1552 +  CodeEmitInfo* null_check_info = NULL;
  1.1553 +  if (x->needs_null_check()) {
  1.1554 +    NullCheck* nc = x->explicit_null_check();
  1.1555 +    if (nc != NULL) {
  1.1556 +      null_check_info = state_for(nc);
  1.1557 +    } else {
  1.1558 +      null_check_info = range_check_info;
  1.1559 +    }
  1.1560 +  }
  1.1561 +
  1.1562 +  // emit array address setup early so it schedules better
  1.1563 +  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
  1.1564 +
  1.1565 +  if (GenerateRangeChecks && needs_range_check) {
  1.1566 +    if (use_length) {
  1.1567 +      // TODO: use a (modified) version of array_range_check that does not require a
  1.1568 +      //       constant length to be loaded to a register
  1.1569 +      __ cmp(lir_cond_belowEqual, length.result(), index.result());
  1.1570 +      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
  1.1571 +    } else {
  1.1572 +      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
  1.1573 +      // The range check performs the null check, so clear it out for the load
  1.1574 +      null_check_info = NULL;
  1.1575 +    }
  1.1576 +  }
  1.1577 +
  1.1578 +  __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
  1.1579 +}
  1.1580 +
  1.1581 +
  1.1582 +void LIRGenerator::do_NullCheck(NullCheck* x) {
  1.1583 +  if (x->can_trap()) {
  1.1584 +    LIRItem value(x->obj(), this);
  1.1585 +    value.load_item();
  1.1586 +    CodeEmitInfo* info = state_for(x);
  1.1587 +    __ null_check(value.result(), info);
  1.1588 +  }
  1.1589 +}
  1.1590 +
  1.1591 +
  1.1592 +void LIRGenerator::do_Throw(Throw* x) {
  1.1593 +  LIRItem exception(x->exception(), this);
  1.1594 +  exception.load_item();
  1.1595 +  set_no_result(x);
  1.1596 +  LIR_Opr exception_opr = exception.result();
  1.1597 +  CodeEmitInfo* info = state_for(x, x->state());
  1.1598 +
  1.1599 +#ifndef PRODUCT
  1.1600 +  if (PrintC1Statistics) {
  1.1601 +    increment_counter(Runtime1::throw_count_address());
  1.1602 +  }
  1.1603 +#endif
  1.1604 +
  1.1605 +  // check if the instruction has an xhandler in any of the nested scopes
  1.1606 +  bool unwind = false;
  1.1607 +  if (info->exception_handlers()->length() == 0) {
  1.1608 +    // this throw is not inside an xhandler
  1.1609 +    unwind = true;
  1.1610 +  } else {
  1.1611 +    // get some idea of the throw type
  1.1612 +    bool type_is_exact = true;
  1.1613 +    ciType* throw_type = x->exception()->exact_type();
  1.1614 +    if (throw_type == NULL) {
  1.1615 +      type_is_exact = false;
  1.1616 +      throw_type = x->exception()->declared_type();
  1.1617 +    }
  1.1618 +    if (throw_type != NULL && throw_type->is_instance_klass()) {
  1.1619 +      ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
  1.1620 +      unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
  1.1621 +    }
  1.1622 +  }
  1.1623 +
  1.1624 +  // do null check before moving exception oop into fixed register
  1.1625 +  // to avoid a fixed interval with an oop during the null check.
  1.1626 +  // Use a copy of the CodeEmitInfo because debug information is
  1.1627 +  // different for null_check and throw.
  1.1628 +  if (GenerateCompilerNullChecks &&
  1.1629 +      (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
  1.1630 +    // if the exception object wasn't created using new then it might be null.
  1.1631 +    __ null_check(exception_opr, new CodeEmitInfo(info, true));
  1.1632 +  }
  1.1633 +
  1.1634 +  if (JvmtiExport::can_post_exceptions() &&
  1.1635 +      !block()->is_set(BlockBegin::default_exception_handler_flag)) {
  1.1636 +    // we need to go through the exception lookup path to get JVMTI
  1.1637 +    // notification done
  1.1638 +    unwind = false;
  1.1639 +  }
  1.1640 +
  1.1641 +  assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind,
  1.1642 +         "should be no more handlers to dispatch to");
  1.1643 +
  1.1644 +  if (DTraceMethodProbes &&
  1.1645 +      block()->is_set(BlockBegin::default_exception_handler_flag)) {
  1.1646 +    // notify that this frame is unwinding
  1.1647 +    BasicTypeList signature;
  1.1648 +    signature.append(T_INT);    // thread
  1.1649 +    signature.append(T_OBJECT); // methodOop
  1.1650 +    LIR_OprList* args = new LIR_OprList();
  1.1651 +    args->append(getThreadPointer());
  1.1652 +    LIR_Opr meth = new_register(T_OBJECT);
  1.1653 +    __ oop2reg(method()->encoding(), meth);
  1.1654 +    args->append(meth);
  1.1655 +    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
  1.1656 +  }
  1.1657 +
  1.1658 +  // move exception oop into fixed register
  1.1659 +  __ move(exception_opr, exceptionOopOpr());
  1.1660 +
  1.1661 +  if (unwind) {
  1.1662 +    __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info);
  1.1663 +  } else {
  1.1664 +    __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
  1.1665 +  }
  1.1666 +}
  1.1667 +
  1.1668 +
  1.1669 +void LIRGenerator::do_RoundFP(RoundFP* x) {
  1.1670 +  LIRItem input(x->input(), this);
  1.1671 +  input.load_item();
  1.1672 +  LIR_Opr input_opr = input.result();
  1.1673 +  assert(input_opr->is_register(), "why round if value is not in a register?");
  1.1674 +  assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
  1.1675 +  if (input_opr->is_single_fpu()) {
  1.1676 +    set_result(x, round_item(input_opr)); // This code path not currently taken
  1.1677 +  } else {
  1.1678 +    LIR_Opr result = new_register(T_DOUBLE);
  1.1679 +    set_vreg_flag(result, must_start_in_memory);
  1.1680 +    __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
  1.1681 +    set_result(x, result);
  1.1682 +  }
  1.1683 +}
  1.1684 +
  1.1685 +void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
  1.1686 +  LIRItem base(x->base(), this);
  1.1687 +  LIRItem idx(this);
  1.1688 +
  1.1689 +  base.load_item();
  1.1690 +  if (x->has_index()) {
  1.1691 +    idx.set_instruction(x->index());
  1.1692 +    idx.load_nonconstant();
  1.1693 +  }
  1.1694 +
  1.1695 +  LIR_Opr reg = rlock_result(x, x->basic_type());
  1.1696 +
  1.1697 +  int   log2_scale = 0;
  1.1698 +  if (x->has_index()) {
  1.1699 +    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
  1.1700 +    log2_scale = x->log2_scale();
  1.1701 +  }
  1.1702 +
  1.1703 +  assert(!x->has_index() || idx.value() == x->index(), "should match");
  1.1704 +
  1.1705 +  LIR_Opr base_op = base.result();
  1.1706 +#ifndef _LP64
  1.1707 +  if (x->base()->type()->tag() == longTag) {
  1.1708 +    base_op = new_register(T_INT);
  1.1709 +    __ convert(Bytecodes::_l2i, base.result(), base_op);
  1.1710 +  } else {
  1.1711 +    assert(x->base()->type()->tag() == intTag, "must be");
  1.1712 +  }
  1.1713 +#endif
  1.1714 +
  1.1715 +  BasicType dst_type = x->basic_type();
  1.1716 +  LIR_Opr index_op = idx.result();
  1.1717 +
  1.1718 +  LIR_Address* addr;
  1.1719 +  if (index_op->is_constant()) {
  1.1720 +    assert(log2_scale == 0, "must not have a scale");
  1.1721 +    addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
  1.1722 +  } else {
  1.1723 +#ifdef IA32
  1.1724 +    addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
  1.1725 +#else
  1.1726 +    if (index_op->is_illegal() || log2_scale == 0) {
  1.1727 +      addr = new LIR_Address(base_op, index_op, dst_type);
  1.1728 +    } else {
  1.1729 +      LIR_Opr tmp = new_register(T_INT);
  1.1730 +      __ shift_left(index_op, log2_scale, tmp);
  1.1731 +      addr = new LIR_Address(base_op, tmp, dst_type);
  1.1732 +    }
  1.1733 +#endif
  1.1734 +  }
  1.1735 +
  1.1736 +  if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
  1.1737 +    __ unaligned_move(addr, reg);
  1.1738 +  } else {
  1.1739 +    __ move(addr, reg);
  1.1740 +  }
  1.1741 +}
  1.1742 +
  1.1743 +
  1.1744 +void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
  1.1745 +  int  log2_scale = 0;
  1.1746 +  BasicType type = x->basic_type();
  1.1747 +
  1.1748 +  if (x->has_index()) {
  1.1749 +    assert(x->index()->type()->tag() == intTag, "should not find non-int index");
  1.1750 +    log2_scale = x->log2_scale();
  1.1751 +  }
  1.1752 +
  1.1753 +  LIRItem base(x->base(), this);
  1.1754 +  LIRItem value(x->value(), this);
  1.1755 +  LIRItem idx(this);
  1.1756 +
  1.1757 +  base.load_item();
  1.1758 +  if (x->has_index()) {
  1.1759 +    idx.set_instruction(x->index());
  1.1760 +    idx.load_item();
  1.1761 +  }
  1.1762 +
  1.1763 +  if (type == T_BYTE || type == T_BOOLEAN) {
  1.1764 +    value.load_byte_item();
  1.1765 +  } else {
  1.1766 +    value.load_item();
  1.1767 +  }
  1.1768 +
  1.1769 +  set_no_result(x);
  1.1770 +
  1.1771 +  LIR_Opr base_op = base.result();
  1.1772 +#ifndef _LP64
  1.1773 +  if (x->base()->type()->tag() == longTag) {
  1.1774 +    base_op = new_register(T_INT);
  1.1775 +    __ convert(Bytecodes::_l2i, base.result(), base_op);
  1.1776 +  } else {
  1.1777 +    assert(x->base()->type()->tag() == intTag, "must be");
  1.1778 +  }
  1.1779 +#endif
  1.1780 +
  1.1781 +  LIR_Opr index_op = idx.result();
  1.1782 +  if (log2_scale != 0) {
  1.1783 +    // temporary fix (platform dependent code without shift on Intel would be better)
  1.1784 +    index_op = new_register(T_INT);
  1.1785 +    __ move(idx.result(), index_op);
  1.1786 +    __ shift_left(index_op, log2_scale, index_op);
  1.1787 +  }
  1.1788 +
  1.1789 +  LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
  1.1790 +  __ move(value.result(), addr);
  1.1791 +}
  1.1792 +
  1.1793 +
  1.1794 +void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
  1.1795 +  BasicType type = x->basic_type();
  1.1796 +  LIRItem src(x->object(), this);
  1.1797 +  LIRItem off(x->offset(), this);
  1.1798 +
  1.1799 +  off.load_item();
  1.1800 +  src.load_item();
  1.1801 +
  1.1802 +  LIR_Opr reg = reg = rlock_result(x, x->basic_type());
  1.1803 +
  1.1804 +  if (x->is_volatile() && os::is_MP()) __ membar_acquire();
  1.1805 +  get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile());
  1.1806 +  if (x->is_volatile() && os::is_MP()) __ membar();
  1.1807 +}
  1.1808 +
  1.1809 +
  1.1810 +void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
  1.1811 +  BasicType type = x->basic_type();
  1.1812 +  LIRItem src(x->object(), this);
  1.1813 +  LIRItem off(x->offset(), this);
  1.1814 +  LIRItem data(x->value(), this);
  1.1815 +
  1.1816 +  src.load_item();
  1.1817 +  if (type == T_BOOLEAN || type == T_BYTE) {
  1.1818 +    data.load_byte_item();
  1.1819 +  } else {
  1.1820 +    data.load_item();
  1.1821 +  }
  1.1822 +  off.load_item();
  1.1823 +
  1.1824 +  set_no_result(x);
  1.1825 +
  1.1826 +  if (x->is_volatile() && os::is_MP()) __ membar_release();
  1.1827 +  put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
  1.1828 +}
  1.1829 +
  1.1830 +
  1.1831 +void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
  1.1832 +  LIRItem src(x->object(), this);
  1.1833 +  LIRItem off(x->offset(), this);
  1.1834 +
  1.1835 +  src.load_item();
  1.1836 +  if (off.is_constant() && can_inline_as_constant(x->offset())) {
  1.1837 +    // let it be a constant
  1.1838 +    off.dont_load_item();
  1.1839 +  } else {
  1.1840 +    off.load_item();
  1.1841 +  }
  1.1842 +
  1.1843 +  set_no_result(x);
  1.1844 +
  1.1845 +  LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
  1.1846 +  __ prefetch(addr, is_store);
  1.1847 +}
  1.1848 +
  1.1849 +
  1.1850 +void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
  1.1851 +  do_UnsafePrefetch(x, false);
  1.1852 +}
  1.1853 +
  1.1854 +
  1.1855 +void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
  1.1856 +  do_UnsafePrefetch(x, true);
  1.1857 +}
  1.1858 +
  1.1859 +
  1.1860 +void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
  1.1861 +  int lng = x->length();
  1.1862 +
  1.1863 +  for (int i = 0; i < lng; i++) {
  1.1864 +    SwitchRange* one_range = x->at(i);
  1.1865 +    int low_key = one_range->low_key();
  1.1866 +    int high_key = one_range->high_key();
  1.1867 +    BlockBegin* dest = one_range->sux();
  1.1868 +    if (low_key == high_key) {
  1.1869 +      __ cmp(lir_cond_equal, value, low_key);
  1.1870 +      __ branch(lir_cond_equal, T_INT, dest);
  1.1871 +    } else if (high_key - low_key == 1) {
  1.1872 +      __ cmp(lir_cond_equal, value, low_key);
  1.1873 +      __ branch(lir_cond_equal, T_INT, dest);
  1.1874 +      __ cmp(lir_cond_equal, value, high_key);
  1.1875 +      __ branch(lir_cond_equal, T_INT, dest);
  1.1876 +    } else {
  1.1877 +      LabelObj* L = new LabelObj();
  1.1878 +      __ cmp(lir_cond_less, value, low_key);
  1.1879 +      __ branch(lir_cond_less, L->label());
  1.1880 +      __ cmp(lir_cond_lessEqual, value, high_key);
  1.1881 +      __ branch(lir_cond_lessEqual, T_INT, dest);
  1.1882 +      __ branch_destination(L->label());
  1.1883 +    }
  1.1884 +  }
  1.1885 +  __ jump(default_sux);
  1.1886 +}
  1.1887 +
  1.1888 +
  1.1889 +SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
  1.1890 +  SwitchRangeList* res = new SwitchRangeList();
  1.1891 +  int len = x->length();
  1.1892 +  if (len > 0) {
  1.1893 +    BlockBegin* sux = x->sux_at(0);
  1.1894 +    int key = x->lo_key();
  1.1895 +    BlockBegin* default_sux = x->default_sux();
  1.1896 +    SwitchRange* range = new SwitchRange(key, sux);
  1.1897 +    for (int i = 0; i < len; i++, key++) {
  1.1898 +      BlockBegin* new_sux = x->sux_at(i);
  1.1899 +      if (sux == new_sux) {
  1.1900 +        // still in same range
  1.1901 +        range->set_high_key(key);
  1.1902 +      } else {
  1.1903 +        // skip tests which explicitly dispatch to the default
  1.1904 +        if (sux != default_sux) {
  1.1905 +          res->append(range);
  1.1906 +        }
  1.1907 +        range = new SwitchRange(key, new_sux);
  1.1908 +      }
  1.1909 +      sux = new_sux;
  1.1910 +    }
  1.1911 +    if (res->length() == 0 || res->last() != range)  res->append(range);
  1.1912 +  }
  1.1913 +  return res;
  1.1914 +}
  1.1915 +
  1.1916 +
  1.1917 +// we expect the keys to be sorted by increasing value
  1.1918 +SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
  1.1919 +  SwitchRangeList* res = new SwitchRangeList();
  1.1920 +  int len = x->length();
  1.1921 +  if (len > 0) {
  1.1922 +    BlockBegin* default_sux = x->default_sux();
  1.1923 +    int key = x->key_at(0);
  1.1924 +    BlockBegin* sux = x->sux_at(0);
  1.1925 +    SwitchRange* range = new SwitchRange(key, sux);
  1.1926 +    for (int i = 1; i < len; i++) {
  1.1927 +      int new_key = x->key_at(i);
  1.1928 +      BlockBegin* new_sux = x->sux_at(i);
  1.1929 +      if (key+1 == new_key && sux == new_sux) {
  1.1930 +        // still in same range
  1.1931 +        range->set_high_key(new_key);
  1.1932 +      } else {
  1.1933 +        // skip tests which explicitly dispatch to the default
  1.1934 +        if (range->sux() != default_sux) {
  1.1935 +          res->append(range);
  1.1936 +        }
  1.1937 +        range = new SwitchRange(new_key, new_sux);
  1.1938 +      }
  1.1939 +      key = new_key;
  1.1940 +      sux = new_sux;
  1.1941 +    }
  1.1942 +    if (res->length() == 0 || res->last() != range)  res->append(range);
  1.1943 +  }
  1.1944 +  return res;
  1.1945 +}
  1.1946 +
  1.1947 +
  1.1948 +void LIRGenerator::do_TableSwitch(TableSwitch* x) {
  1.1949 +  LIRItem tag(x->tag(), this);
  1.1950 +  tag.load_item();
  1.1951 +  set_no_result(x);
  1.1952 +
  1.1953 +  if (x->is_safepoint()) {
  1.1954 +    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
  1.1955 +  }
  1.1956 +
  1.1957 +  // move values into phi locations
  1.1958 +  move_to_phi(x->state());
  1.1959 +
  1.1960 +  int lo_key = x->lo_key();
  1.1961 +  int hi_key = x->hi_key();
  1.1962 +  int len = x->length();
  1.1963 +  CodeEmitInfo* info = state_for(x, x->state());
  1.1964 +  LIR_Opr value = tag.result();
  1.1965 +  if (UseTableRanges) {
  1.1966 +    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
  1.1967 +  } else {
  1.1968 +    for (int i = 0; i < len; i++) {
  1.1969 +      __ cmp(lir_cond_equal, value, i + lo_key);
  1.1970 +      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
  1.1971 +    }
  1.1972 +    __ jump(x->default_sux());
  1.1973 +  }
  1.1974 +}
  1.1975 +
  1.1976 +
  1.1977 +void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
  1.1978 +  LIRItem tag(x->tag(), this);
  1.1979 +  tag.load_item();
  1.1980 +  set_no_result(x);
  1.1981 +
  1.1982 +  if (x->is_safepoint()) {
  1.1983 +    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
  1.1984 +  }
  1.1985 +
  1.1986 +  // move values into phi locations
  1.1987 +  move_to_phi(x->state());
  1.1988 +
  1.1989 +  LIR_Opr value = tag.result();
  1.1990 +  if (UseTableRanges) {
  1.1991 +    do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
  1.1992 +  } else {
  1.1993 +    int len = x->length();
  1.1994 +    for (int i = 0; i < len; i++) {
  1.1995 +      __ cmp(lir_cond_equal, value, x->key_at(i));
  1.1996 +      __ branch(lir_cond_equal, T_INT, x->sux_at(i));
  1.1997 +    }
  1.1998 +    __ jump(x->default_sux());
  1.1999 +  }
  1.2000 +}
  1.2001 +
  1.2002 +
  1.2003 +void LIRGenerator::do_Goto(Goto* x) {
  1.2004 +  set_no_result(x);
  1.2005 +
  1.2006 +  if (block()->next()->as_OsrEntry()) {
  1.2007 +    // need to free up storage used for OSR entry point
  1.2008 +    LIR_Opr osrBuffer = block()->next()->operand();
  1.2009 +    BasicTypeList signature;
  1.2010 +    signature.append(T_INT);
  1.2011 +    CallingConvention* cc = frame_map()->c_calling_convention(&signature);
  1.2012 +    __ move(osrBuffer, cc->args()->at(0));
  1.2013 +    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
  1.2014 +                         getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
  1.2015 +  }
  1.2016 +
  1.2017 +  if (x->is_safepoint()) {
  1.2018 +    ValueStack* state = x->state_before() ? x->state_before() : x->state();
  1.2019 +
  1.2020 +    // increment backedge counter if needed
  1.2021 +    increment_backedge_counter(state_for(x, state));
  1.2022 +
  1.2023 +    CodeEmitInfo* safepoint_info = state_for(x, state);
  1.2024 +    __ safepoint(safepoint_poll_register(), safepoint_info);
  1.2025 +  }
  1.2026 +
  1.2027 +  // emit phi-instruction move after safepoint since this simplifies
  1.2028 +  // describing the state as the safepoint.
  1.2029 +  move_to_phi(x->state());
  1.2030 +
  1.2031 +  __ jump(x->default_sux());
  1.2032 +}
  1.2033 +
  1.2034 +
  1.2035 +void LIRGenerator::do_Base(Base* x) {
  1.2036 +  __ std_entry(LIR_OprFact::illegalOpr);
  1.2037 +  // Emit moves from physical registers / stack slots to virtual registers
  1.2038 +  CallingConvention* args = compilation()->frame_map()->incoming_arguments();
  1.2039 +  IRScope* irScope = compilation()->hir()->top_scope();
  1.2040 +  int java_index = 0;
  1.2041 +  for (int i = 0; i < args->length(); i++) {
  1.2042 +    LIR_Opr src = args->at(i);
  1.2043 +    assert(!src->is_illegal(), "check");
  1.2044 +    BasicType t = src->type();
  1.2045 +
  1.2046 +    // Types which are smaller than int are passed as int, so
  1.2047 +    // correct the type which passed.
  1.2048 +    switch (t) {
  1.2049 +    case T_BYTE:
  1.2050 +    case T_BOOLEAN:
  1.2051 +    case T_SHORT:
  1.2052 +    case T_CHAR:
  1.2053 +      t = T_INT;
  1.2054 +      break;
  1.2055 +    }
  1.2056 +
  1.2057 +    LIR_Opr dest = new_register(t);
  1.2058 +    __ move(src, dest);
  1.2059 +
  1.2060 +    // Assign new location to Local instruction for this local
  1.2061 +    Local* local = x->state()->local_at(java_index)->as_Local();
  1.2062 +    assert(local != NULL, "Locals for incoming arguments must have been created");
  1.2063 +    assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
  1.2064 +    local->set_operand(dest);
  1.2065 +    _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
  1.2066 +    java_index += type2size[t];
  1.2067 +  }
  1.2068 +
  1.2069 +  if (DTraceMethodProbes) {
  1.2070 +    BasicTypeList signature;
  1.2071 +    signature.append(T_INT);    // thread
  1.2072 +    signature.append(T_OBJECT); // methodOop
  1.2073 +    LIR_OprList* args = new LIR_OprList();
  1.2074 +    args->append(getThreadPointer());
  1.2075 +    LIR_Opr meth = new_register(T_OBJECT);
  1.2076 +    __ oop2reg(method()->encoding(), meth);
  1.2077 +    args->append(meth);
  1.2078 +    call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
  1.2079 +  }
  1.2080 +
  1.2081 +  if (method()->is_synchronized()) {
  1.2082 +    LIR_Opr obj;
  1.2083 +    if (method()->is_static()) {
  1.2084 +      obj = new_register(T_OBJECT);
  1.2085 +      __ oop2reg(method()->holder()->java_mirror()->encoding(), obj);
  1.2086 +    } else {
  1.2087 +      Local* receiver = x->state()->local_at(0)->as_Local();
  1.2088 +      assert(receiver != NULL, "must already exist");
  1.2089 +      obj = receiver->operand();
  1.2090 +    }
  1.2091 +    assert(obj->is_valid(), "must be valid");
  1.2092 +
  1.2093 +    if (method()->is_synchronized() && GenerateSynchronizationCode) {
  1.2094 +      LIR_Opr lock = new_register(T_INT);
  1.2095 +      __ load_stack_address_monitor(0, lock);
  1.2096 +
  1.2097 +      CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL);
  1.2098 +      CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
  1.2099 +
  1.2100 +      // receiver is guaranteed non-NULL so don't need CodeEmitInfo
  1.2101 +      __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
  1.2102 +    }
  1.2103 +  }
  1.2104 +
  1.2105 +  // increment invocation counters if needed
  1.2106 +  increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL));
  1.2107 +
  1.2108 +  // all blocks with a successor must end with an unconditional jump
  1.2109 +  // to the successor even if they are consecutive
  1.2110 +  __ jump(x->default_sux());
  1.2111 +}
  1.2112 +
  1.2113 +
  1.2114 +void LIRGenerator::do_OsrEntry(OsrEntry* x) {
  1.2115 +  // construct our frame and model the production of incoming pointer
  1.2116 +  // to the OSR buffer.
  1.2117 +  __ osr_entry(LIR_Assembler::osrBufferPointer());
  1.2118 +  LIR_Opr result = rlock_result(x);
  1.2119 +  __ move(LIR_Assembler::osrBufferPointer(), result);
  1.2120 +}
  1.2121 +
  1.2122 +
  1.2123 +void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
  1.2124 +  int i = x->has_receiver() ? 1 : 0;
  1.2125 +  for (; i < args->length(); i++) {
  1.2126 +    LIRItem* param = args->at(i);
  1.2127 +    LIR_Opr loc = arg_list->at(i);
  1.2128 +    if (loc->is_register()) {
  1.2129 +      param->load_item_force(loc);
  1.2130 +    } else {
  1.2131 +      LIR_Address* addr = loc->as_address_ptr();
  1.2132 +      param->load_for_store(addr->type());
  1.2133 +      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
  1.2134 +        __ unaligned_move(param->result(), addr);
  1.2135 +      } else {
  1.2136 +        __ move(param->result(), addr);
  1.2137 +      }
  1.2138 +    }
  1.2139 +  }
  1.2140 +
  1.2141 +  if (x->has_receiver()) {
  1.2142 +    LIRItem* receiver = args->at(0);
  1.2143 +    LIR_Opr loc = arg_list->at(0);
  1.2144 +    if (loc->is_register()) {
  1.2145 +      receiver->load_item_force(loc);
  1.2146 +    } else {
  1.2147 +      assert(loc->is_address(), "just checking");
  1.2148 +      receiver->load_for_store(T_OBJECT);
  1.2149 +      __ move(receiver->result(), loc);
  1.2150 +    }
  1.2151 +  }
  1.2152 +}
  1.2153 +
  1.2154 +
  1.2155 +// Visits all arguments, returns appropriate items without loading them
  1.2156 +LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
  1.2157 +  LIRItemList* argument_items = new LIRItemList();
  1.2158 +  if (x->has_receiver()) {
  1.2159 +    LIRItem* receiver = new LIRItem(x->receiver(), this);
  1.2160 +    argument_items->append(receiver);
  1.2161 +  }
  1.2162 +  int idx = x->has_receiver() ? 1 : 0;
  1.2163 +  for (int i = 0; i < x->number_of_arguments(); i++) {
  1.2164 +    LIRItem* param = new LIRItem(x->argument_at(i), this);
  1.2165 +    argument_items->append(param);
  1.2166 +    idx += (param->type()->is_double_word() ? 2 : 1);
  1.2167 +  }
  1.2168 +  return argument_items;
  1.2169 +}
  1.2170 +
  1.2171 +
  1.2172 +// The invoke with receiver has following phases:
  1.2173 +//   a) traverse and load/lock receiver;
  1.2174 +//   b) traverse all arguments -> item-array (invoke_visit_argument)
  1.2175 +//   c) push receiver on stack
  1.2176 +//   d) load each of the items and push on stack
  1.2177 +//   e) unlock receiver
  1.2178 +//   f) move receiver into receiver-register %o0
  1.2179 +//   g) lock result registers and emit call operation
  1.2180 +//
  1.2181 +// Before issuing a call, we must spill-save all values on stack
  1.2182 +// that are in caller-save register. "spill-save" moves thos registers
  1.2183 +// either in a free callee-save register or spills them if no free
  1.2184 +// callee save register is available.
  1.2185 +//
  1.2186 +// The problem is where to invoke spill-save.
  1.2187 +// - if invoked between e) and f), we may lock callee save
  1.2188 +//   register in "spill-save" that destroys the receiver register
  1.2189 +//   before f) is executed
  1.2190 +// - if we rearange the f) to be earlier, by loading %o0, it
  1.2191 +//   may destroy a value on the stack that is currently in %o0
  1.2192 +//   and is waiting to be spilled
  1.2193 +// - if we keep the receiver locked while doing spill-save,
  1.2194 +//   we cannot spill it as it is spill-locked
  1.2195 +//
  1.2196 +void LIRGenerator::do_Invoke(Invoke* x) {
  1.2197 +  CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
  1.2198 +
  1.2199 +  LIR_OprList* arg_list = cc->args();
  1.2200 +  LIRItemList* args = invoke_visit_arguments(x);
  1.2201 +  LIR_Opr receiver = LIR_OprFact::illegalOpr;
  1.2202 +
  1.2203 +  // setup result register
  1.2204 +  LIR_Opr result_register = LIR_OprFact::illegalOpr;
  1.2205 +  if (x->type() != voidType) {
  1.2206 +    result_register = result_register_for(x->type());
  1.2207 +  }
  1.2208 +
  1.2209 +  CodeEmitInfo* info = state_for(x, x->state());
  1.2210 +
  1.2211 +  invoke_load_arguments(x, args, arg_list);
  1.2212 +
  1.2213 +  if (x->has_receiver()) {
  1.2214 +    args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
  1.2215 +    receiver = args->at(0)->result();
  1.2216 +  }
  1.2217 +
  1.2218 +  // emit invoke code
  1.2219 +  bool optimized = x->target_is_loaded() && x->target_is_final();
  1.2220 +  assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
  1.2221 +
  1.2222 +  switch (x->code()) {
  1.2223 +    case Bytecodes::_invokestatic:
  1.2224 +      __ call_static(x->target(), result_register,
  1.2225 +                     SharedRuntime::get_resolve_static_call_stub(),
  1.2226 +                     arg_list, info);
  1.2227 +      break;
  1.2228 +    case Bytecodes::_invokespecial:
  1.2229 +    case Bytecodes::_invokevirtual:
  1.2230 +    case Bytecodes::_invokeinterface:
  1.2231 +      // for final target we still produce an inline cache, in order
  1.2232 +      // to be able to call mixed mode
  1.2233 +      if (x->code() == Bytecodes::_invokespecial || optimized) {
  1.2234 +        __ call_opt_virtual(x->target(), receiver, result_register,
  1.2235 +                            SharedRuntime::get_resolve_opt_virtual_call_stub(),
  1.2236 +                            arg_list, info);
  1.2237 +      } else if (x->vtable_index() < 0) {
  1.2238 +        __ call_icvirtual(x->target(), receiver, result_register,
  1.2239 +                          SharedRuntime::get_resolve_virtual_call_stub(),
  1.2240 +                          arg_list, info);
  1.2241 +      } else {
  1.2242 +        int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
  1.2243 +        int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
  1.2244 +        __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info);
  1.2245 +      }
  1.2246 +      break;
  1.2247 +    default:
  1.2248 +      ShouldNotReachHere();
  1.2249 +      break;
  1.2250 +  }
  1.2251 +
  1.2252 +  if (x->type()->is_float() || x->type()->is_double()) {
  1.2253 +    // Force rounding of results from non-strictfp when in strictfp
  1.2254 +    // scope (or when we don't know the strictness of the callee, to
  1.2255 +    // be safe.)
  1.2256 +    if (method()->is_strict()) {
  1.2257 +      if (!x->target_is_loaded() || !x->target_is_strictfp()) {
  1.2258 +        result_register = round_item(result_register);
  1.2259 +      }
  1.2260 +    }
  1.2261 +  }
  1.2262 +
  1.2263 +  if (result_register->is_valid()) {
  1.2264 +    LIR_Opr result = rlock_result(x);
  1.2265 +    __ move(result_register, result);
  1.2266 +  }
  1.2267 +}
  1.2268 +
  1.2269 +
  1.2270 +void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
  1.2271 +  assert(x->number_of_arguments() == 1, "wrong type");
  1.2272 +  LIRItem value       (x->argument_at(0), this);
  1.2273 +  LIR_Opr reg = rlock_result(x);
  1.2274 +  value.load_item();
  1.2275 +  LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
  1.2276 +  __ move(tmp, reg);
  1.2277 +}
  1.2278 +
  1.2279 +
  1.2280 +
  1.2281 +// Code for  :  x->x() {x->cond()} x->y() ? x->tval() : x->fval()
  1.2282 +void LIRGenerator::do_IfOp(IfOp* x) {
  1.2283 +#ifdef ASSERT
  1.2284 +  {
  1.2285 +    ValueTag xtag = x->x()->type()->tag();
  1.2286 +    ValueTag ttag = x->tval()->type()->tag();
  1.2287 +    assert(xtag == intTag || xtag == objectTag, "cannot handle others");
  1.2288 +    assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
  1.2289 +    assert(ttag == x->fval()->type()->tag(), "cannot handle others");
  1.2290 +  }
  1.2291 +#endif
  1.2292 +
  1.2293 +  LIRItem left(x->x(), this);
  1.2294 +  LIRItem right(x->y(), this);
  1.2295 +  left.load_item();
  1.2296 +  if (can_inline_as_constant(right.value())) {
  1.2297 +    right.dont_load_item();
  1.2298 +  } else {
  1.2299 +    right.load_item();
  1.2300 +  }
  1.2301 +
  1.2302 +  LIRItem t_val(x->tval(), this);
  1.2303 +  LIRItem f_val(x->fval(), this);
  1.2304 +  t_val.dont_load_item();
  1.2305 +  f_val.dont_load_item();
  1.2306 +  LIR_Opr reg = rlock_result(x);
  1.2307 +
  1.2308 +  __ cmp(lir_cond(x->cond()), left.result(), right.result());
  1.2309 +  __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg);
  1.2310 +}
  1.2311 +
  1.2312 +
  1.2313 +void LIRGenerator::do_Intrinsic(Intrinsic* x) {
  1.2314 +  switch (x->id()) {
  1.2315 +  case vmIntrinsics::_intBitsToFloat      :
  1.2316 +  case vmIntrinsics::_doubleToRawLongBits :
  1.2317 +  case vmIntrinsics::_longBitsToDouble    :
  1.2318 +  case vmIntrinsics::_floatToRawIntBits   : {
  1.2319 +    do_FPIntrinsics(x);
  1.2320 +    break;
  1.2321 +  }
  1.2322 +
  1.2323 +  case vmIntrinsics::_currentTimeMillis: {
  1.2324 +    assert(x->number_of_arguments() == 0, "wrong type");
  1.2325 +    LIR_Opr reg = result_register_for(x->type());
  1.2326 +    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(),
  1.2327 +                         reg, new LIR_OprList());
  1.2328 +    LIR_Opr result = rlock_result(x);
  1.2329 +    __ move(reg, result);
  1.2330 +    break;
  1.2331 +  }
  1.2332 +
  1.2333 +  case vmIntrinsics::_nanoTime: {
  1.2334 +    assert(x->number_of_arguments() == 0, "wrong type");
  1.2335 +    LIR_Opr reg = result_register_for(x->type());
  1.2336 +    __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(),
  1.2337 +                         reg, new LIR_OprList());
  1.2338 +    LIR_Opr result = rlock_result(x);
  1.2339 +    __ move(reg, result);
  1.2340 +    break;
  1.2341 +  }
  1.2342 +
  1.2343 +  case vmIntrinsics::_Object_init:    do_RegisterFinalizer(x); break;
  1.2344 +  case vmIntrinsics::_getClass:       do_getClass(x);      break;
  1.2345 +  case vmIntrinsics::_currentThread:  do_currentThread(x); break;
  1.2346 +
  1.2347 +  case vmIntrinsics::_dlog:           // fall through
  1.2348 +  case vmIntrinsics::_dlog10:         // fall through
  1.2349 +  case vmIntrinsics::_dabs:           // fall through
  1.2350 +  case vmIntrinsics::_dsqrt:          // fall through
  1.2351 +  case vmIntrinsics::_dtan:           // fall through
  1.2352 +  case vmIntrinsics::_dsin :          // fall through
  1.2353 +  case vmIntrinsics::_dcos :          do_MathIntrinsic(x); break;
  1.2354 +  case vmIntrinsics::_arraycopy:      do_ArrayCopy(x);     break;
  1.2355 +
  1.2356 +  // java.nio.Buffer.checkIndex
  1.2357 +  case vmIntrinsics::_checkIndex:     do_NIOCheckIndex(x); break;
  1.2358 +
  1.2359 +  case vmIntrinsics::_compareAndSwapObject:
  1.2360 +    do_CompareAndSwap(x, objectType);
  1.2361 +    break;
  1.2362 +  case vmIntrinsics::_compareAndSwapInt:
  1.2363 +    do_CompareAndSwap(x, intType);
  1.2364 +    break;
  1.2365 +  case vmIntrinsics::_compareAndSwapLong:
  1.2366 +    do_CompareAndSwap(x, longType);
  1.2367 +    break;
  1.2368 +
  1.2369 +    // sun.misc.AtomicLongCSImpl.attemptUpdate
  1.2370 +  case vmIntrinsics::_attemptUpdate:
  1.2371 +    do_AttemptUpdate(x);
  1.2372 +    break;
  1.2373 +
  1.2374 +  default: ShouldNotReachHere(); break;
  1.2375 +  }
  1.2376 +}
  1.2377 +
  1.2378 +
  1.2379 +void LIRGenerator::do_ProfileCall(ProfileCall* x) {
  1.2380 +  // Need recv in a temporary register so it interferes with the other temporaries
  1.2381 +  LIR_Opr recv = LIR_OprFact::illegalOpr;
  1.2382 +  LIR_Opr mdo = new_register(T_OBJECT);
  1.2383 +  LIR_Opr tmp = new_register(T_INT);
  1.2384 +  if (x->recv() != NULL) {
  1.2385 +    LIRItem value(x->recv(), this);
  1.2386 +    value.load_item();
  1.2387 +    recv = new_register(T_OBJECT);
  1.2388 +    __ move(value.result(), recv);
  1.2389 +  }
  1.2390 +  __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
  1.2391 +}
  1.2392 +
  1.2393 +
  1.2394 +void LIRGenerator::do_ProfileCounter(ProfileCounter* x) {
  1.2395 +  LIRItem mdo(x->mdo(), this);
  1.2396 +  mdo.load_item();
  1.2397 +
  1.2398 +  increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment());
  1.2399 +}
  1.2400 +
  1.2401 +
  1.2402 +LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
  1.2403 +  LIRItemList args(1);
  1.2404 +  LIRItem value(arg1, this);
  1.2405 +  args.append(&value);
  1.2406 +  BasicTypeList signature;
  1.2407 +  signature.append(as_BasicType(arg1->type()));
  1.2408 +
  1.2409 +  return call_runtime(&signature, &args, entry, result_type, info);
  1.2410 +}
  1.2411 +
  1.2412 +
  1.2413 +LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
  1.2414 +  LIRItemList args(2);
  1.2415 +  LIRItem value1(arg1, this);
  1.2416 +  LIRItem value2(arg2, this);
  1.2417 +  args.append(&value1);
  1.2418 +  args.append(&value2);
  1.2419 +  BasicTypeList signature;
  1.2420 +  signature.append(as_BasicType(arg1->type()));
  1.2421 +  signature.append(as_BasicType(arg2->type()));
  1.2422 +
  1.2423 +  return call_runtime(&signature, &args, entry, result_type, info);
  1.2424 +}
  1.2425 +
  1.2426 +
  1.2427 +LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
  1.2428 +                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
  1.2429 +  // get a result register
  1.2430 +  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
  1.2431 +  LIR_Opr result = LIR_OprFact::illegalOpr;
  1.2432 +  if (result_type->tag() != voidTag) {
  1.2433 +    result = new_register(result_type);
  1.2434 +    phys_reg = result_register_for(result_type);
  1.2435 +  }
  1.2436 +
  1.2437 +  // move the arguments into the correct location
  1.2438 +  CallingConvention* cc = frame_map()->c_calling_convention(signature);
  1.2439 +  assert(cc->length() == args->length(), "argument mismatch");
  1.2440 +  for (int i = 0; i < args->length(); i++) {
  1.2441 +    LIR_Opr arg = args->at(i);
  1.2442 +    LIR_Opr loc = cc->at(i);
  1.2443 +    if (loc->is_register()) {
  1.2444 +      __ move(arg, loc);
  1.2445 +    } else {
  1.2446 +      LIR_Address* addr = loc->as_address_ptr();
  1.2447 +//           if (!can_store_as_constant(arg)) {
  1.2448 +//             LIR_Opr tmp = new_register(arg->type());
  1.2449 +//             __ move(arg, tmp);
  1.2450 +//             arg = tmp;
  1.2451 +//           }
  1.2452 +      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
  1.2453 +        __ unaligned_move(arg, addr);
  1.2454 +      } else {
  1.2455 +        __ move(arg, addr);
  1.2456 +      }
  1.2457 +    }
  1.2458 +  }
  1.2459 +
  1.2460 +  if (info) {
  1.2461 +    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
  1.2462 +  } else {
  1.2463 +    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
  1.2464 +  }
  1.2465 +  if (result->is_valid()) {
  1.2466 +    __ move(phys_reg, result);
  1.2467 +  }
  1.2468 +  return result;
  1.2469 +}
  1.2470 +
  1.2471 +
  1.2472 +LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
  1.2473 +                                   address entry, ValueType* result_type, CodeEmitInfo* info) {
  1.2474 +  // get a result register
  1.2475 +  LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
  1.2476 +  LIR_Opr result = LIR_OprFact::illegalOpr;
  1.2477 +  if (result_type->tag() != voidTag) {
  1.2478 +    result = new_register(result_type);
  1.2479 +    phys_reg = result_register_for(result_type);
  1.2480 +  }
  1.2481 +
  1.2482 +  // move the arguments into the correct location
  1.2483 +  CallingConvention* cc = frame_map()->c_calling_convention(signature);
  1.2484 +
  1.2485 +  assert(cc->length() == args->length(), "argument mismatch");
  1.2486 +  for (int i = 0; i < args->length(); i++) {
  1.2487 +    LIRItem* arg = args->at(i);
  1.2488 +    LIR_Opr loc = cc->at(i);
  1.2489 +    if (loc->is_register()) {
  1.2490 +      arg->load_item_force(loc);
  1.2491 +    } else {
  1.2492 +      LIR_Address* addr = loc->as_address_ptr();
  1.2493 +      arg->load_for_store(addr->type());
  1.2494 +      if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
  1.2495 +        __ unaligned_move(arg->result(), addr);
  1.2496 +      } else {
  1.2497 +        __ move(arg->result(), addr);
  1.2498 +      }
  1.2499 +    }
  1.2500 +  }
  1.2501 +
  1.2502 +  if (info) {
  1.2503 +    __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
  1.2504 +  } else {
  1.2505 +    __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
  1.2506 +  }
  1.2507 +  if (result->is_valid()) {
  1.2508 +    __ move(phys_reg, result);
  1.2509 +  }
  1.2510 +  return result;
  1.2511 +}
  1.2512 +
  1.2513 +
  1.2514 +
  1.2515 +void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) {
  1.2516 +#ifdef TIERED
  1.2517 +  if (_compilation->env()->comp_level() == CompLevel_fast_compile &&
  1.2518 +      (method()->code_size() >= Tier1BytecodeLimit || backedge)) {
  1.2519 +    int limit = InvocationCounter::Tier1InvocationLimit;
  1.2520 +    int offset = in_bytes(methodOopDesc::invocation_counter_offset() +
  1.2521 +                          InvocationCounter::counter_offset());
  1.2522 +    if (backedge) {
  1.2523 +      limit = InvocationCounter::Tier1BackEdgeLimit;
  1.2524 +      offset = in_bytes(methodOopDesc::backedge_counter_offset() +
  1.2525 +                        InvocationCounter::counter_offset());
  1.2526 +    }
  1.2527 +
  1.2528 +    LIR_Opr meth = new_register(T_OBJECT);
  1.2529 +    __ oop2reg(method()->encoding(), meth);
  1.2530 +    LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
  1.2531 +    __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
  1.2532 +    CodeStub* overflow = new CounterOverflowStub(info, info->bci());
  1.2533 +    __ branch(lir_cond_aboveEqual, T_INT, overflow);
  1.2534 +    __ branch_destination(overflow->continuation());
  1.2535 +  }
  1.2536 +#endif
  1.2537 +}

mercurial