src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,1260 @@
     1.4 +/*
     1.5 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "c1/c1_Compilation.hpp"
    1.30 +#include "c1/c1_FrameMap.hpp"
    1.31 +#include "c1/c1_Instruction.hpp"
    1.32 +#include "c1/c1_LIRAssembler.hpp"
    1.33 +#include "c1/c1_LIRGenerator.hpp"
    1.34 +#include "c1/c1_Runtime1.hpp"
    1.35 +#include "c1/c1_ValueStack.hpp"
    1.36 +#include "ci/ciArray.hpp"
    1.37 +#include "ci/ciObjArrayKlass.hpp"
    1.38 +#include "ci/ciTypeArrayKlass.hpp"
    1.39 +#include "runtime/sharedRuntime.hpp"
    1.40 +#include "runtime/stubRoutines.hpp"
    1.41 +#include "vmreg_sparc.inline.hpp"
    1.42 +
    1.43 +#ifdef ASSERT
    1.44 +#define __ gen()->lir(__FILE__, __LINE__)->
    1.45 +#else
    1.46 +#define __ gen()->lir()->
    1.47 +#endif
    1.48 +
    1.49 +void LIRItem::load_byte_item() {
    1.50 +  // byte loads use same registers as other loads
    1.51 +  load_item();
    1.52 +}
    1.53 +
    1.54 +
    1.55 +void LIRItem::load_nonconstant() {
    1.56 +  LIR_Opr r = value()->operand();
    1.57 +  if (_gen->can_inline_as_constant(value())) {
    1.58 +    if (!r->is_constant()) {
    1.59 +      r = LIR_OprFact::value_type(value()->type());
    1.60 +    }
    1.61 +    _result = r;
    1.62 +  } else {
    1.63 +    load_item();
    1.64 +  }
    1.65 +}
    1.66 +
    1.67 +
    1.68 +//--------------------------------------------------------------
    1.69 +//               LIRGenerator
    1.70 +//--------------------------------------------------------------
    1.71 +
    1.72 +LIR_Opr LIRGenerator::exceptionOopOpr()              { return FrameMap::Oexception_opr;  }
    1.73 +LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::Oissuing_pc_opr; }
    1.74 +LIR_Opr LIRGenerator::syncTempOpr()                  { return new_register(T_OBJECT); }
    1.75 +LIR_Opr LIRGenerator::getThreadTemp()                { return rlock_callee_saved(T_INT); }
    1.76 +
    1.77 +LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
    1.78 +  LIR_Opr opr;
    1.79 +  switch (type->tag()) {
    1.80 +  case intTag:     opr = callee ? FrameMap::I0_opr      : FrameMap::O0_opr;       break;
    1.81 +  case objectTag:  opr = callee ? FrameMap::I0_oop_opr  : FrameMap::O0_oop_opr;   break;
    1.82 +  case longTag:    opr = callee ? FrameMap::in_long_opr : FrameMap::out_long_opr; break;
    1.83 +  case floatTag:   opr = FrameMap::F0_opr;                                        break;
    1.84 +  case doubleTag:  opr = FrameMap::F0_double_opr;                                 break;
    1.85 +
    1.86 +  case addressTag:
    1.87 +  default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
    1.88 +  }
    1.89 +
    1.90 +  assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
    1.91 +  return opr;
    1.92 +}
    1.93 +
    1.94 +LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
    1.95 +  LIR_Opr reg = new_register(type);
    1.96 +  set_vreg_flag(reg, callee_saved);
    1.97 +  return reg;
    1.98 +}
    1.99 +
   1.100 +
   1.101 +LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
   1.102 +  return new_register(T_INT);
   1.103 +}
   1.104 +
   1.105 +
   1.106 +
   1.107 +
   1.108 +
   1.109 +//--------- loading items into registers --------------------------------
   1.110 +
   1.111 +// SPARC cannot inline all constants
   1.112 +bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
   1.113 +  if (v->type()->as_IntConstant() != NULL) {
   1.114 +    return v->type()->as_IntConstant()->value() == 0;
   1.115 +  } else if (v->type()->as_LongConstant() != NULL) {
   1.116 +    return v->type()->as_LongConstant()->value() == 0L;
   1.117 +  } else if (v->type()->as_ObjectConstant() != NULL) {
   1.118 +    return v->type()->as_ObjectConstant()->value()->is_null_object();
   1.119 +  } else {
   1.120 +    return false;
   1.121 +  }
   1.122 +}
   1.123 +
   1.124 +
   1.125 +// only simm13 constants can be inlined
   1.126 +bool LIRGenerator:: can_inline_as_constant(Value i) const {
   1.127 +  if (i->type()->as_IntConstant() != NULL) {
   1.128 +    return Assembler::is_simm13(i->type()->as_IntConstant()->value());
   1.129 +  } else {
   1.130 +    return can_store_as_constant(i, as_BasicType(i->type()));
   1.131 +  }
   1.132 +}
   1.133 +
   1.134 +
   1.135 +bool LIRGenerator:: can_inline_as_constant(LIR_Const* c) const {
   1.136 +  if (c->type() == T_INT) {
   1.137 +    return Assembler::is_simm13(c->as_jint());
   1.138 +  }
   1.139 +  return false;
   1.140 +}
   1.141 +
   1.142 +
   1.143 +LIR_Opr LIRGenerator::safepoint_poll_register() {
   1.144 +  return new_register(T_INT);
   1.145 +}
   1.146 +
   1.147 +
   1.148 +
   1.149 +LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
   1.150 +                                            int shift, int disp, BasicType type) {
   1.151 +  assert(base->is_register(), "must be");
   1.152 +
   1.153 +  // accumulate fixed displacements
   1.154 +  if (index->is_constant()) {
   1.155 +    disp += index->as_constant_ptr()->as_jint() << shift;
   1.156 +    index = LIR_OprFact::illegalOpr;
   1.157 +  }
   1.158 +
   1.159 +  if (index->is_register()) {
   1.160 +    // apply the shift and accumulate the displacement
   1.161 +    if (shift > 0) {
   1.162 +      LIR_Opr tmp = new_pointer_register();
   1.163 +      __ shift_left(index, shift, tmp);
   1.164 +      index = tmp;
   1.165 +    }
   1.166 +    if (disp != 0) {
   1.167 +      LIR_Opr tmp = new_pointer_register();
   1.168 +      if (Assembler::is_simm13(disp)) {
   1.169 +        __ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
   1.170 +        index = tmp;
   1.171 +      } else {
   1.172 +        __ move(LIR_OprFact::intptrConst(disp), tmp);
   1.173 +        __ add(tmp, index, tmp);
   1.174 +        index = tmp;
   1.175 +      }
   1.176 +      disp = 0;
   1.177 +    }
   1.178 +  } else if (disp != 0 && !Assembler::is_simm13(disp)) {
   1.179 +    // index is illegal so replace it with the displacement loaded into a register
   1.180 +    index = new_pointer_register();
   1.181 +    __ move(LIR_OprFact::intptrConst(disp), index);
   1.182 +    disp = 0;
   1.183 +  }
   1.184 +
   1.185 +  // at this point we either have base + index or base + displacement
   1.186 +  if (disp == 0) {
   1.187 +    return new LIR_Address(base, index, type);
   1.188 +  } else {
   1.189 +    assert(Assembler::is_simm13(disp), "must be");
   1.190 +    return new LIR_Address(base, disp, type);
   1.191 +  }
   1.192 +}
   1.193 +
   1.194 +
   1.195 +LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
   1.196 +                                              BasicType type, bool needs_card_mark) {
   1.197 +  int elem_size = type2aelembytes(type);
   1.198 +  int shift = exact_log2(elem_size);
   1.199 +
   1.200 +  LIR_Opr base_opr;
   1.201 +  int offset = arrayOopDesc::base_offset_in_bytes(type);
   1.202 +
   1.203 +  if (index_opr->is_constant()) {
   1.204 +    int i = index_opr->as_constant_ptr()->as_jint();
   1.205 +    int array_offset = i * elem_size;
   1.206 +    if (Assembler::is_simm13(array_offset + offset)) {
   1.207 +      base_opr = array_opr;
   1.208 +      offset = array_offset + offset;
   1.209 +    } else {
   1.210 +      base_opr = new_pointer_register();
   1.211 +      if (Assembler::is_simm13(array_offset)) {
   1.212 +        __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
   1.213 +      } else {
   1.214 +        __ move(LIR_OprFact::intptrConst(array_offset), base_opr);
   1.215 +        __ add(base_opr, array_opr, base_opr);
   1.216 +      }
   1.217 +    }
   1.218 +  } else {
   1.219 +#ifdef _LP64
   1.220 +    if (index_opr->type() == T_INT) {
   1.221 +      LIR_Opr tmp = new_register(T_LONG);
   1.222 +      __ convert(Bytecodes::_i2l, index_opr, tmp);
   1.223 +      index_opr = tmp;
   1.224 +    }
   1.225 +#endif
   1.226 +
   1.227 +    base_opr = new_pointer_register();
   1.228 +    assert (index_opr->is_register(), "Must be register");
   1.229 +    if (shift > 0) {
   1.230 +      __ shift_left(index_opr, shift, base_opr);
   1.231 +      __ add(base_opr, array_opr, base_opr);
   1.232 +    } else {
   1.233 +      __ add(index_opr, array_opr, base_opr);
   1.234 +    }
   1.235 +  }
   1.236 +  if (needs_card_mark) {
   1.237 +    LIR_Opr ptr = new_pointer_register();
   1.238 +    __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
   1.239 +    return new LIR_Address(ptr, type);
   1.240 +  } else {
   1.241 +    return new LIR_Address(base_opr, offset, type);
   1.242 +  }
   1.243 +}
   1.244 +
   1.245 +LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
   1.246 +  LIR_Opr r;
   1.247 +  if (type == T_LONG) {
   1.248 +    r = LIR_OprFact::longConst(x);
   1.249 +  } else if (type == T_INT) {
   1.250 +    r = LIR_OprFact::intConst(x);
   1.251 +  } else {
   1.252 +    ShouldNotReachHere();
   1.253 +  }
   1.254 +  if (!Assembler::is_simm13(x)) {
   1.255 +    LIR_Opr tmp = new_register(type);
   1.256 +    __ move(r, tmp);
   1.257 +    return tmp;
   1.258 +  }
   1.259 +  return r;
   1.260 +}
   1.261 +
   1.262 +void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
   1.263 +  LIR_Opr pointer = new_pointer_register();
   1.264 +  __ move(LIR_OprFact::intptrConst(counter), pointer);
   1.265 +  LIR_Address* addr = new LIR_Address(pointer, type);
   1.266 +  increment_counter(addr, step);
   1.267 +}
   1.268 +
   1.269 +void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
   1.270 +  LIR_Opr temp = new_register(addr->type());
   1.271 +  __ move(addr, temp);
   1.272 +  __ add(temp, load_immediate(step, addr->type()), temp);
   1.273 +  __ move(temp, addr);
   1.274 +}
   1.275 +
   1.276 +void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
   1.277 +  LIR_Opr o7opr = FrameMap::O7_opr;
   1.278 +  __ load(new LIR_Address(base, disp, T_INT), o7opr, info);
   1.279 +  __ cmp(condition, o7opr, c);
   1.280 +}
   1.281 +
   1.282 +
   1.283 +void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
   1.284 +  LIR_Opr o7opr = FrameMap::O7_opr;
   1.285 +  __ load(new LIR_Address(base, disp, type), o7opr, info);
   1.286 +  __ cmp(condition, reg, o7opr);
   1.287 +}
   1.288 +
   1.289 +
   1.290 +void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
   1.291 +  LIR_Opr o7opr = FrameMap::O7_opr;
   1.292 +  __ load(new LIR_Address(base, disp, type), o7opr, info);
   1.293 +  __ cmp(condition, reg, o7opr);
   1.294 +}
   1.295 +
   1.296 +
   1.297 +bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
   1.298 +  assert(left != result, "should be different registers");
   1.299 +  if (is_power_of_2(c + 1)) {
   1.300 +    __ shift_left(left, log2_intptr(c + 1), result);
   1.301 +    __ sub(result, left, result);
   1.302 +    return true;
   1.303 +  } else if (is_power_of_2(c - 1)) {
   1.304 +    __ shift_left(left, log2_intptr(c - 1), result);
   1.305 +    __ add(result, left, result);
   1.306 +    return true;
   1.307 +  }
   1.308 +  return false;
   1.309 +}
   1.310 +
   1.311 +
   1.312 +void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
   1.313 +  BasicType t = item->type();
   1.314 +  LIR_Opr sp_opr = FrameMap::SP_opr;
   1.315 +  if ((t == T_LONG || t == T_DOUBLE) &&
   1.316 +      ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) {
   1.317 +    __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
   1.318 +  } else {
   1.319 +    __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
   1.320 +  }
   1.321 +}
   1.322 +
   1.323 +//----------------------------------------------------------------------
   1.324 +//             visitor functions
   1.325 +//----------------------------------------------------------------------
   1.326 +
   1.327 +
   1.328 +void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
   1.329 +  assert(x->is_pinned(),"");
   1.330 +  bool needs_range_check = x->compute_needs_range_check();
   1.331 +  bool use_length = x->length() != NULL;
   1.332 +  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
   1.333 +  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
   1.334 +                                         !get_jobject_constant(x->value())->is_null_object() ||
   1.335 +                                         x->should_profile());
   1.336 +
   1.337 +  LIRItem array(x->array(), this);
   1.338 +  LIRItem index(x->index(), this);
   1.339 +  LIRItem value(x->value(), this);
   1.340 +  LIRItem length(this);
   1.341 +
   1.342 +  array.load_item();
   1.343 +  index.load_nonconstant();
   1.344 +
   1.345 +  if (use_length && needs_range_check) {
   1.346 +    length.set_instruction(x->length());
   1.347 +    length.load_item();
   1.348 +  }
   1.349 +  if (needs_store_check) {
   1.350 +    value.load_item();
   1.351 +  } else {
   1.352 +    value.load_for_store(x->elt_type());
   1.353 +  }
   1.354 +
   1.355 +  set_no_result(x);
   1.356 +
   1.357 +  // the CodeEmitInfo must be duplicated for each different
   1.358 +  // LIR-instruction because spilling can occur anywhere between two
   1.359 +  // instructions and so the debug information must be different
   1.360 +  CodeEmitInfo* range_check_info = state_for(x);
   1.361 +  CodeEmitInfo* null_check_info = NULL;
   1.362 +  if (x->needs_null_check()) {
   1.363 +    null_check_info = new CodeEmitInfo(range_check_info);
   1.364 +  }
   1.365 +
   1.366 +  // emit array address setup early so it schedules better
   1.367 +  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
   1.368 +
   1.369 +  if (GenerateRangeChecks && needs_range_check) {
   1.370 +    if (use_length) {
   1.371 +      __ cmp(lir_cond_belowEqual, length.result(), index.result());
   1.372 +      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
   1.373 +    } else {
   1.374 +      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
   1.375 +      // range_check also does the null check
   1.376 +      null_check_info = NULL;
   1.377 +    }
   1.378 +  }
   1.379 +
   1.380 +  if (GenerateArrayStoreCheck && needs_store_check) {
   1.381 +    LIR_Opr tmp1 = FrameMap::G1_opr;
   1.382 +    LIR_Opr tmp2 = FrameMap::G3_opr;
   1.383 +    LIR_Opr tmp3 = FrameMap::G5_opr;
   1.384 +
   1.385 +    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
   1.386 +    __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
   1.387 +  }
   1.388 +
   1.389 +  if (obj_store) {
   1.390 +    // Needs GC write barriers.
   1.391 +    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
   1.392 +                true /* do_load */, false /* patch */, NULL);
   1.393 +  }
   1.394 +  __ move(value.result(), array_addr, null_check_info);
   1.395 +  if (obj_store) {
   1.396 +    // Precise card mark
   1.397 +    post_barrier(LIR_OprFact::address(array_addr), value.result());
   1.398 +  }
   1.399 +}
   1.400 +
   1.401 +
   1.402 +void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
   1.403 +  assert(x->is_pinned(),"");
   1.404 +  LIRItem obj(x->obj(), this);
   1.405 +  obj.load_item();
   1.406 +
   1.407 +  set_no_result(x);
   1.408 +
   1.409 +  LIR_Opr lock    = FrameMap::G1_opr;
   1.410 +  LIR_Opr scratch = FrameMap::G3_opr;
   1.411 +  LIR_Opr hdr     = FrameMap::G4_opr;
   1.412 +
   1.413 +  CodeEmitInfo* info_for_exception = NULL;
   1.414 +  if (x->needs_null_check()) {
   1.415 +    info_for_exception = state_for(x);
   1.416 +  }
   1.417 +
   1.418 +  // this CodeEmitInfo must not have the xhandlers because here the
   1.419 +  // object is already locked (xhandlers expects object to be unlocked)
   1.420 +  CodeEmitInfo* info = state_for(x, x->state(), true);
   1.421 +  monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
   1.422 +}
   1.423 +
   1.424 +
   1.425 +void LIRGenerator::do_MonitorExit(MonitorExit* x) {
   1.426 +  assert(x->is_pinned(),"");
   1.427 +  LIRItem obj(x->obj(), this);
   1.428 +  obj.dont_load_item();
   1.429 +
   1.430 +  set_no_result(x);
   1.431 +  LIR_Opr lock      = FrameMap::G1_opr;
   1.432 +  LIR_Opr hdr       = FrameMap::G3_opr;
   1.433 +  LIR_Opr obj_temp  = FrameMap::G4_opr;
   1.434 +  monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
   1.435 +}
   1.436 +
   1.437 +
   1.438 +// _ineg, _lneg, _fneg, _dneg
   1.439 +void LIRGenerator::do_NegateOp(NegateOp* x) {
   1.440 +  LIRItem value(x->x(), this);
   1.441 +  value.load_item();
   1.442 +  LIR_Opr reg = rlock_result(x);
   1.443 +  __ negate(value.result(), reg);
   1.444 +}
   1.445 +
   1.446 +
   1.447 +
   1.448 +// for  _fadd, _fmul, _fsub, _fdiv, _frem
   1.449 +//      _dadd, _dmul, _dsub, _ddiv, _drem
   1.450 +void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
   1.451 +  switch (x->op()) {
   1.452 +  case Bytecodes::_fadd:
   1.453 +  case Bytecodes::_fmul:
   1.454 +  case Bytecodes::_fsub:
   1.455 +  case Bytecodes::_fdiv:
   1.456 +  case Bytecodes::_dadd:
   1.457 +  case Bytecodes::_dmul:
   1.458 +  case Bytecodes::_dsub:
   1.459 +  case Bytecodes::_ddiv: {
   1.460 +    LIRItem left(x->x(), this);
   1.461 +    LIRItem right(x->y(), this);
   1.462 +    left.load_item();
   1.463 +    right.load_item();
   1.464 +    rlock_result(x);
   1.465 +    arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
   1.466 +  }
   1.467 +  break;
   1.468 +
   1.469 +  case Bytecodes::_frem:
   1.470 +  case Bytecodes::_drem: {
   1.471 +    address entry;
   1.472 +    switch (x->op()) {
   1.473 +    case Bytecodes::_frem:
   1.474 +      entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
   1.475 +      break;
   1.476 +    case Bytecodes::_drem:
   1.477 +      entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
   1.478 +      break;
   1.479 +    default:
   1.480 +      ShouldNotReachHere();
   1.481 +    }
   1.482 +    LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
   1.483 +    set_result(x, result);
   1.484 +  }
   1.485 +  break;
   1.486 +
   1.487 +  default: ShouldNotReachHere();
   1.488 +  }
   1.489 +}
   1.490 +
   1.491 +
   1.492 +// for  _ladd, _lmul, _lsub, _ldiv, _lrem
   1.493 +void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
   1.494 +  switch (x->op()) {
   1.495 +  case Bytecodes::_lrem:
   1.496 +  case Bytecodes::_lmul:
   1.497 +  case Bytecodes::_ldiv: {
   1.498 +
   1.499 +    if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
   1.500 +      LIRItem right(x->y(), this);
   1.501 +      right.load_item();
   1.502 +
   1.503 +      CodeEmitInfo* info = state_for(x);
   1.504 +      LIR_Opr item = right.result();
   1.505 +      assert(item->is_register(), "must be");
   1.506 +      __ cmp(lir_cond_equal, item, LIR_OprFact::longConst(0));
   1.507 +      __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
   1.508 +    }
   1.509 +
   1.510 +    address entry;
   1.511 +    switch (x->op()) {
   1.512 +    case Bytecodes::_lrem:
   1.513 +      entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
   1.514 +      break; // check if dividend is 0 is done elsewhere
   1.515 +    case Bytecodes::_ldiv:
   1.516 +      entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
   1.517 +      break; // check if dividend is 0 is done elsewhere
   1.518 +    case Bytecodes::_lmul:
   1.519 +      entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
   1.520 +      break;
   1.521 +    default:
   1.522 +      ShouldNotReachHere();
   1.523 +    }
   1.524 +
   1.525 +    // order of arguments to runtime call is reversed.
   1.526 +    LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
   1.527 +    set_result(x, result);
   1.528 +    break;
   1.529 +  }
   1.530 +  case Bytecodes::_ladd:
   1.531 +  case Bytecodes::_lsub: {
   1.532 +    LIRItem left(x->x(), this);
   1.533 +    LIRItem right(x->y(), this);
   1.534 +    left.load_item();
   1.535 +    right.load_item();
   1.536 +    rlock_result(x);
   1.537 +
   1.538 +    arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
   1.539 +    break;
   1.540 +  }
   1.541 +  default: ShouldNotReachHere();
   1.542 +  }
   1.543 +}
   1.544 +
   1.545 +
   1.546 +// Returns if item is an int constant that can be represented by a simm13
   1.547 +static bool is_simm13(LIR_Opr item) {
   1.548 +  if (item->is_constant() && item->type() == T_INT) {
   1.549 +    return Assembler::is_simm13(item->as_constant_ptr()->as_jint());
   1.550 +  } else {
   1.551 +    return false;
   1.552 +  }
   1.553 +}
   1.554 +
   1.555 +
   1.556 +// for: _iadd, _imul, _isub, _idiv, _irem
   1.557 +void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
   1.558 +  bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
   1.559 +  LIRItem left(x->x(), this);
   1.560 +  LIRItem right(x->y(), this);
   1.561 +  // missing test if instr is commutative and if we should swap
   1.562 +  right.load_nonconstant();
   1.563 +  assert(right.is_constant() || right.is_register(), "wrong state of right");
   1.564 +  left.load_item();
   1.565 +  rlock_result(x);
   1.566 +  if (is_div_rem) {
   1.567 +    CodeEmitInfo* info = state_for(x);
   1.568 +    LIR_Opr tmp = FrameMap::G1_opr;
   1.569 +    if (x->op() == Bytecodes::_irem) {
   1.570 +      __ irem(left.result(), right.result(), x->operand(), tmp, info);
   1.571 +    } else if (x->op() == Bytecodes::_idiv) {
   1.572 +      __ idiv(left.result(), right.result(), x->operand(), tmp, info);
   1.573 +    }
   1.574 +  } else {
   1.575 +    arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::G1_opr);
   1.576 +  }
   1.577 +}
   1.578 +
   1.579 +
   1.580 +void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
   1.581 +  ValueTag tag = x->type()->tag();
   1.582 +  assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
   1.583 +  switch (tag) {
   1.584 +    case floatTag:
   1.585 +    case doubleTag:  do_ArithmeticOp_FPU(x);  return;
   1.586 +    case longTag:    do_ArithmeticOp_Long(x); return;
   1.587 +    case intTag:     do_ArithmeticOp_Int(x);  return;
   1.588 +  }
   1.589 +  ShouldNotReachHere();
   1.590 +}
   1.591 +
   1.592 +
   1.593 +// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
   1.594 +void LIRGenerator::do_ShiftOp(ShiftOp* x) {
   1.595 +  LIRItem value(x->x(), this);
   1.596 +  LIRItem count(x->y(), this);
   1.597 +  // Long shift destroys count register
   1.598 +  if (value.type()->is_long()) {
   1.599 +    count.set_destroys_register();
   1.600 +  }
   1.601 +  value.load_item();
   1.602 +  // the old backend doesn't support this
   1.603 +  if (count.is_constant() && count.type()->as_IntConstant() != NULL && value.type()->is_int()) {
   1.604 +    jint c = count.get_jint_constant() & 0x1f;
   1.605 +    assert(c >= 0 && c < 32, "should be small");
   1.606 +    count.dont_load_item();
   1.607 +  } else {
   1.608 +    count.load_item();
   1.609 +  }
   1.610 +  LIR_Opr reg = rlock_result(x);
   1.611 +  shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
   1.612 +}
   1.613 +
   1.614 +
   1.615 +// _iand, _land, _ior, _lor, _ixor, _lxor
   1.616 +void LIRGenerator::do_LogicOp(LogicOp* x) {
   1.617 +  LIRItem left(x->x(), this);
   1.618 +  LIRItem right(x->y(), this);
   1.619 +
   1.620 +  left.load_item();
   1.621 +  right.load_nonconstant();
   1.622 +  LIR_Opr reg = rlock_result(x);
   1.623 +
   1.624 +  logic_op(x->op(), reg, left.result(), right.result());
   1.625 +}
   1.626 +
   1.627 +
   1.628 +
   1.629 +// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
   1.630 +void LIRGenerator::do_CompareOp(CompareOp* x) {
   1.631 +  LIRItem left(x->x(), this);
   1.632 +  LIRItem right(x->y(), this);
   1.633 +  left.load_item();
   1.634 +  right.load_item();
   1.635 +  LIR_Opr reg = rlock_result(x);
   1.636 +  if (x->x()->type()->is_float_kind()) {
   1.637 +    Bytecodes::Code code = x->op();
   1.638 +    __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
   1.639 +  } else if (x->x()->type()->tag() == longTag) {
   1.640 +    __ lcmp2int(left.result(), right.result(), reg);
   1.641 +  } else {
   1.642 +    Unimplemented();
   1.643 +  }
   1.644 +}
   1.645 +
   1.646 +
   1.647 +void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
   1.648 +  assert(x->number_of_arguments() == 4, "wrong type");
   1.649 +  LIRItem obj   (x->argument_at(0), this);  // object
   1.650 +  LIRItem offset(x->argument_at(1), this);  // offset of field
   1.651 +  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
   1.652 +  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
   1.653 +
   1.654 +  // Use temps to avoid kills
   1.655 +  LIR_Opr t1 = FrameMap::G1_opr;
   1.656 +  LIR_Opr t2 = FrameMap::G3_opr;
   1.657 +  LIR_Opr addr = new_pointer_register();
   1.658 +
   1.659 +  // get address of field
   1.660 +  obj.load_item();
   1.661 +  offset.load_item();
   1.662 +  cmp.load_item();
   1.663 +  val.load_item();
   1.664 +
   1.665 +  __ add(obj.result(), offset.result(), addr);
   1.666 +
   1.667 +  if (type == objectType) {  // Write-barrier needed for Object fields.
   1.668 +    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
   1.669 +                true /* do_load */, false /* patch */, NULL);
   1.670 +  }
   1.671 +
   1.672 +  if (type == objectType)
   1.673 +    __ cas_obj(addr, cmp.result(), val.result(), t1, t2);
   1.674 +  else if (type == intType)
   1.675 +    __ cas_int(addr, cmp.result(), val.result(), t1, t2);
   1.676 +  else if (type == longType)
   1.677 +    __ cas_long(addr, cmp.result(), val.result(), t1, t2);
   1.678 +  else {
   1.679 +    ShouldNotReachHere();
   1.680 +  }
   1.681 +  // generate conditional move of boolean result
   1.682 +  LIR_Opr result = rlock_result(x);
   1.683 +  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
   1.684 +           result, as_BasicType(type));
   1.685 +  if (type == objectType) {  // Write-barrier needed for Object fields.
   1.686 +    // Precise card mark since could either be object or array
   1.687 +    post_barrier(addr, val.result());
   1.688 +  }
   1.689 +}
   1.690 +
   1.691 +
   1.692 +void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
   1.693 +  switch (x->id()) {
   1.694 +    case vmIntrinsics::_dabs:
   1.695 +    case vmIntrinsics::_dsqrt: {
   1.696 +      assert(x->number_of_arguments() == 1, "wrong type");
   1.697 +      LIRItem value(x->argument_at(0), this);
   1.698 +      value.load_item();
   1.699 +      LIR_Opr dst = rlock_result(x);
   1.700 +
   1.701 +      switch (x->id()) {
   1.702 +      case vmIntrinsics::_dsqrt: {
   1.703 +        __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
   1.704 +        break;
   1.705 +      }
   1.706 +      case vmIntrinsics::_dabs: {
   1.707 +        __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
   1.708 +        break;
   1.709 +      }
   1.710 +      }
   1.711 +      break;
   1.712 +    }
   1.713 +    case vmIntrinsics::_dlog10: // fall through
   1.714 +    case vmIntrinsics::_dlog: // fall through
   1.715 +    case vmIntrinsics::_dsin: // fall through
   1.716 +    case vmIntrinsics::_dtan: // fall through
   1.717 +    case vmIntrinsics::_dcos: // fall through
   1.718 +    case vmIntrinsics::_dexp: {
   1.719 +      assert(x->number_of_arguments() == 1, "wrong type");
   1.720 +
   1.721 +      address runtime_entry = NULL;
   1.722 +      switch (x->id()) {
   1.723 +      case vmIntrinsics::_dsin:
   1.724 +        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
   1.725 +        break;
   1.726 +      case vmIntrinsics::_dcos:
   1.727 +        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
   1.728 +        break;
   1.729 +      case vmIntrinsics::_dtan:
   1.730 +        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
   1.731 +        break;
   1.732 +      case vmIntrinsics::_dlog:
   1.733 +        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
   1.734 +        break;
   1.735 +      case vmIntrinsics::_dlog10:
   1.736 +        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
   1.737 +        break;
   1.738 +      case vmIntrinsics::_dexp:
   1.739 +        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
   1.740 +        break;
   1.741 +      default:
   1.742 +        ShouldNotReachHere();
   1.743 +      }
   1.744 +
   1.745 +      LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
   1.746 +      set_result(x, result);
   1.747 +      break;
   1.748 +    }
   1.749 +    case vmIntrinsics::_dpow: {
   1.750 +      assert(x->number_of_arguments() == 2, "wrong type");
   1.751 +      address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
   1.752 +      LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
   1.753 +      set_result(x, result);
   1.754 +      break;
   1.755 +    }
   1.756 +  }
   1.757 +}
   1.758 +
   1.759 +
   1.760 +void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
   1.761 +  assert(x->number_of_arguments() == 5, "wrong type");
   1.762 +
   1.763 +  // Make all state_for calls early since they can emit code
   1.764 +  CodeEmitInfo* info = state_for(x, x->state());
   1.765 +
   1.766 +  // Note: spill caller save before setting the item
   1.767 +  LIRItem src     (x->argument_at(0), this);
   1.768 +  LIRItem src_pos (x->argument_at(1), this);
   1.769 +  LIRItem dst     (x->argument_at(2), this);
   1.770 +  LIRItem dst_pos (x->argument_at(3), this);
   1.771 +  LIRItem length  (x->argument_at(4), this);
   1.772 +  // load all values in callee_save_registers, as this makes the
   1.773 +  // parameter passing to the fast case simpler
   1.774 +  src.load_item_force     (rlock_callee_saved(T_OBJECT));
   1.775 +  src_pos.load_item_force (rlock_callee_saved(T_INT));
   1.776 +  dst.load_item_force     (rlock_callee_saved(T_OBJECT));
   1.777 +  dst_pos.load_item_force (rlock_callee_saved(T_INT));
   1.778 +  length.load_item_force  (rlock_callee_saved(T_INT));
   1.779 +
   1.780 +  int flags;
   1.781 +  ciArrayKlass* expected_type;
   1.782 +  arraycopy_helper(x, &flags, &expected_type);
   1.783 +
   1.784 +  __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
   1.785 +               length.result(), rlock_callee_saved(T_INT),
   1.786 +               expected_type, flags, info);
   1.787 +  set_no_result(x);
   1.788 +}
   1.789 +
   1.790 +void LIRGenerator::do_update_CRC32(Intrinsic* x) {
   1.791 +  fatal("CRC32 intrinsic is not implemented on this platform");
   1.792 +}
   1.793 +
   1.794 +// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
   1.795 +// _i2b, _i2c, _i2s
   1.796 +void LIRGenerator::do_Convert(Convert* x) {
   1.797 +
   1.798 +  switch (x->op()) {
   1.799 +    case Bytecodes::_f2l:
   1.800 +    case Bytecodes::_d2l:
   1.801 +    case Bytecodes::_d2i:
   1.802 +    case Bytecodes::_l2f:
   1.803 +    case Bytecodes::_l2d: {
   1.804 +
   1.805 +      address entry;
   1.806 +      switch (x->op()) {
   1.807 +      case Bytecodes::_l2f:
   1.808 +        entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
   1.809 +        break;
   1.810 +      case Bytecodes::_l2d:
   1.811 +        entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2d);
   1.812 +        break;
   1.813 +      case Bytecodes::_f2l:
   1.814 +        entry = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
   1.815 +        break;
   1.816 +      case Bytecodes::_d2l:
   1.817 +        entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
   1.818 +        break;
   1.819 +      case Bytecodes::_d2i:
   1.820 +        entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2i);
   1.821 +        break;
   1.822 +      default:
   1.823 +        ShouldNotReachHere();
   1.824 +      }
   1.825 +      LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
   1.826 +      set_result(x, result);
   1.827 +      break;
   1.828 +    }
   1.829 +
   1.830 +    case Bytecodes::_i2f:
   1.831 +    case Bytecodes::_i2d: {
   1.832 +      LIRItem value(x->value(), this);
   1.833 +
   1.834 +      LIR_Opr reg = rlock_result(x);
   1.835 +      // To convert an int to double, we need to load the 32-bit int
   1.836 +      // from memory into a single precision floating point register
   1.837 +      // (even numbered). Then the sparc fitod instruction takes care
   1.838 +      // of the conversion. This is a bit ugly, but is the best way to
   1.839 +      // get the int value in a single precision floating point register
   1.840 +      value.load_item();
   1.841 +      LIR_Opr tmp = force_to_spill(value.result(), T_FLOAT);
   1.842 +      __ convert(x->op(), tmp, reg);
   1.843 +      break;
   1.844 +    }
   1.845 +    break;
   1.846 +
   1.847 +    case Bytecodes::_i2l:
   1.848 +    case Bytecodes::_i2b:
   1.849 +    case Bytecodes::_i2c:
   1.850 +    case Bytecodes::_i2s:
   1.851 +    case Bytecodes::_l2i:
   1.852 +    case Bytecodes::_f2d:
   1.853 +    case Bytecodes::_d2f: { // inline code
   1.854 +      LIRItem value(x->value(), this);
   1.855 +
   1.856 +      value.load_item();
   1.857 +      LIR_Opr reg = rlock_result(x);
   1.858 +      __ convert(x->op(), value.result(), reg, false);
   1.859 +    }
   1.860 +    break;
   1.861 +
   1.862 +    case Bytecodes::_f2i: {
   1.863 +      LIRItem value (x->value(), this);
   1.864 +      value.set_destroys_register();
   1.865 +      value.load_item();
   1.866 +      LIR_Opr reg = rlock_result(x);
   1.867 +      set_vreg_flag(reg, must_start_in_memory);
   1.868 +      __ convert(x->op(), value.result(), reg, false);
   1.869 +    }
   1.870 +    break;
   1.871 +
   1.872 +    default: ShouldNotReachHere();
   1.873 +  }
   1.874 +}
   1.875 +
   1.876 +
   1.877 +void LIRGenerator::do_NewInstance(NewInstance* x) {
   1.878 +  // This instruction can be deoptimized in the slow path : use
   1.879 +  // O0 as result register.
   1.880 +  const LIR_Opr reg = result_register_for(x->type());
   1.881 +#ifndef PRODUCT
   1.882 +  if (PrintNotLoaded && !x->klass()->is_loaded()) {
   1.883 +    tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
   1.884 +  }
   1.885 +#endif
   1.886 +  CodeEmitInfo* info = state_for(x, x->state());
   1.887 +  LIR_Opr tmp1 = FrameMap::G1_oop_opr;
   1.888 +  LIR_Opr tmp2 = FrameMap::G3_oop_opr;
   1.889 +  LIR_Opr tmp3 = FrameMap::G4_oop_opr;
   1.890 +  LIR_Opr tmp4 = FrameMap::O1_oop_opr;
   1.891 +  LIR_Opr klass_reg = FrameMap::G5_metadata_opr;
   1.892 +  new_instance(reg, x->klass(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
   1.893 +  LIR_Opr result = rlock_result(x);
   1.894 +  __ move(reg, result);
   1.895 +}
   1.896 +
   1.897 +
   1.898 +void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
   1.899 +  // Evaluate state_for early since it may emit code
   1.900 +  CodeEmitInfo* info = state_for(x, x->state());
   1.901 +
   1.902 +  LIRItem length(x->length(), this);
   1.903 +  length.load_item();
   1.904 +
   1.905 +  LIR_Opr reg = result_register_for(x->type());
   1.906 +  LIR_Opr tmp1 = FrameMap::G1_oop_opr;
   1.907 +  LIR_Opr tmp2 = FrameMap::G3_oop_opr;
   1.908 +  LIR_Opr tmp3 = FrameMap::G4_oop_opr;
   1.909 +  LIR_Opr tmp4 = FrameMap::O1_oop_opr;
   1.910 +  LIR_Opr klass_reg = FrameMap::G5_metadata_opr;
   1.911 +  LIR_Opr len = length.result();
   1.912 +  BasicType elem_type = x->elt_type();
   1.913 +
   1.914 +  __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
   1.915 +
   1.916 +  CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
   1.917 +  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
   1.918 +
   1.919 +  LIR_Opr result = rlock_result(x);
   1.920 +  __ move(reg, result);
   1.921 +}
   1.922 +
   1.923 +
   1.924 +void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
   1.925 +  // Evaluate state_for early since it may emit code.
   1.926 +  CodeEmitInfo* info = state_for(x, x->state());
   1.927 +  // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
   1.928 +  // and therefore provide the state before the parameters have been consumed
   1.929 +  CodeEmitInfo* patching_info = NULL;
   1.930 +  if (!x->klass()->is_loaded() || PatchALot) {
   1.931 +    patching_info = state_for(x, x->state_before());
   1.932 +  }
   1.933 +
   1.934 +  LIRItem length(x->length(), this);
   1.935 +  length.load_item();
   1.936 +
   1.937 +  const LIR_Opr reg = result_register_for(x->type());
   1.938 +  LIR_Opr tmp1 = FrameMap::G1_oop_opr;
   1.939 +  LIR_Opr tmp2 = FrameMap::G3_oop_opr;
   1.940 +  LIR_Opr tmp3 = FrameMap::G4_oop_opr;
   1.941 +  LIR_Opr tmp4 = FrameMap::O1_oop_opr;
   1.942 +  LIR_Opr klass_reg = FrameMap::G5_metadata_opr;
   1.943 +  LIR_Opr len = length.result();
   1.944 +
   1.945 +  CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
   1.946 +  ciMetadata* obj = ciObjArrayKlass::make(x->klass());
   1.947 +  if (obj == ciEnv::unloaded_ciobjarrayklass()) {
   1.948 +    BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
   1.949 +  }
   1.950 +  klass2reg_with_patching(klass_reg, obj, patching_info);
   1.951 +  __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
   1.952 +
   1.953 +  LIR_Opr result = rlock_result(x);
   1.954 +  __ move(reg, result);
   1.955 +}
   1.956 +
   1.957 +
   1.958 +void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
   1.959 +  Values* dims = x->dims();
   1.960 +  int i = dims->length();
   1.961 +  LIRItemList* items = new LIRItemList(dims->length(), NULL);
   1.962 +  while (i-- > 0) {
   1.963 +    LIRItem* size = new LIRItem(dims->at(i), this);
   1.964 +    items->at_put(i, size);
   1.965 +  }
   1.966 +
   1.967 +  // Evaluate state_for early since it may emit code.
   1.968 +  CodeEmitInfo* patching_info = NULL;
   1.969 +  if (!x->klass()->is_loaded() || PatchALot) {
   1.970 +    patching_info = state_for(x, x->state_before());
   1.971 +
   1.972 +    // Cannot re-use same xhandlers for multiple CodeEmitInfos, so
   1.973 +    // clone all handlers (NOTE: Usually this is handled transparently
   1.974 +    // by the CodeEmitInfo cloning logic in CodeStub constructors but
   1.975 +    // is done explicitly here because a stub isn't being used).
   1.976 +    x->set_exception_handlers(new XHandlers(x->exception_handlers()));
   1.977 +  }
   1.978 +  CodeEmitInfo* info = state_for(x, x->state());
   1.979 +
   1.980 +  i = dims->length();
   1.981 +  while (i-- > 0) {
   1.982 +    LIRItem* size = items->at(i);
   1.983 +    size->load_item();
   1.984 +    store_stack_parameter (size->result(),
   1.985 +                           in_ByteSize(STACK_BIAS +
   1.986 +                                       frame::memory_parameter_word_sp_offset * wordSize +
   1.987 +                                       i * sizeof(jint)));
   1.988 +  }
   1.989 +
   1.990 +  // This instruction can be deoptimized in the slow path : use
   1.991 +  // O0 as result register.
   1.992 +  const LIR_Opr klass_reg = FrameMap::O0_metadata_opr;
   1.993 +  klass2reg_with_patching(klass_reg, x->klass(), patching_info);
   1.994 +  LIR_Opr rank = FrameMap::O1_opr;
   1.995 +  __ move(LIR_OprFact::intConst(x->rank()), rank);
   1.996 +  LIR_Opr varargs = FrameMap::as_pointer_opr(O2);
   1.997 +  int offset_from_sp = (frame::memory_parameter_word_sp_offset * wordSize) + STACK_BIAS;
   1.998 +  __ add(FrameMap::SP_opr,
   1.999 +         LIR_OprFact::intptrConst(offset_from_sp),
  1.1000 +         varargs);
  1.1001 +  LIR_OprList* args = new LIR_OprList(3);
  1.1002 +  args->append(klass_reg);
  1.1003 +  args->append(rank);
  1.1004 +  args->append(varargs);
  1.1005 +  const LIR_Opr reg = result_register_for(x->type());
  1.1006 +  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
  1.1007 +                  LIR_OprFact::illegalOpr,
  1.1008 +                  reg, args, info);
  1.1009 +
  1.1010 +  LIR_Opr result = rlock_result(x);
  1.1011 +  __ move(reg, result);
  1.1012 +}
  1.1013 +
  1.1014 +
  1.1015 +void LIRGenerator::do_BlockBegin(BlockBegin* x) {
  1.1016 +}
  1.1017 +
  1.1018 +
  1.1019 +void LIRGenerator::do_CheckCast(CheckCast* x) {
  1.1020 +  LIRItem obj(x->obj(), this);
  1.1021 +  CodeEmitInfo* patching_info = NULL;
  1.1022 +  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
  1.1023 +    // must do this before locking the destination register as an oop register,
  1.1024 +    // and before the obj is loaded (so x->obj()->item() is valid for creating a debug info location)
  1.1025 +    patching_info = state_for(x, x->state_before());
  1.1026 +  }
  1.1027 +  obj.load_item();
  1.1028 +  LIR_Opr out_reg = rlock_result(x);
  1.1029 +  CodeStub* stub;
  1.1030 +  CodeEmitInfo* info_for_exception = state_for(x);
  1.1031 +
  1.1032 +  if (x->is_incompatible_class_change_check()) {
  1.1033 +    assert(patching_info == NULL, "can't patch this");
  1.1034 +    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
  1.1035 +  } else {
  1.1036 +    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
  1.1037 +  }
  1.1038 +  LIR_Opr tmp1 = FrameMap::G1_oop_opr;
  1.1039 +  LIR_Opr tmp2 = FrameMap::G3_oop_opr;
  1.1040 +  LIR_Opr tmp3 = FrameMap::G4_oop_opr;
  1.1041 +  __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
  1.1042 +               x->direct_compare(), info_for_exception, patching_info, stub,
  1.1043 +               x->profiled_method(), x->profiled_bci());
  1.1044 +}
  1.1045 +
  1.1046 +
  1.1047 +void LIRGenerator::do_InstanceOf(InstanceOf* x) {
  1.1048 +  LIRItem obj(x->obj(), this);
  1.1049 +  CodeEmitInfo* patching_info = NULL;
  1.1050 +  if (!x->klass()->is_loaded() || PatchALot) {
  1.1051 +    patching_info = state_for(x, x->state_before());
  1.1052 +  }
  1.1053 +  // ensure the result register is not the input register because the result is initialized before the patching safepoint
  1.1054 +  obj.load_item();
  1.1055 +  LIR_Opr out_reg = rlock_result(x);
  1.1056 +  LIR_Opr tmp1 = FrameMap::G1_oop_opr;
  1.1057 +  LIR_Opr tmp2 = FrameMap::G3_oop_opr;
  1.1058 +  LIR_Opr tmp3 = FrameMap::G4_oop_opr;
  1.1059 +  __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
  1.1060 +                x->direct_compare(), patching_info,
  1.1061 +                x->profiled_method(), x->profiled_bci());
  1.1062 +}
  1.1063 +
  1.1064 +
  1.1065 +void LIRGenerator::do_If(If* x) {
  1.1066 +  assert(x->number_of_sux() == 2, "inconsistency");
  1.1067 +  ValueTag tag = x->x()->type()->tag();
  1.1068 +  LIRItem xitem(x->x(), this);
  1.1069 +  LIRItem yitem(x->y(), this);
  1.1070 +  LIRItem* xin = &xitem;
  1.1071 +  LIRItem* yin = &yitem;
  1.1072 +  If::Condition cond = x->cond();
  1.1073 +
  1.1074 +  if (tag == longTag) {
  1.1075 +    // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
  1.1076 +    // mirror for other conditions
  1.1077 +    if (cond == If::gtr || cond == If::leq) {
  1.1078 +      // swap inputs
  1.1079 +      cond = Instruction::mirror(cond);
  1.1080 +      xin = &yitem;
  1.1081 +      yin = &xitem;
  1.1082 +    }
  1.1083 +    xin->set_destroys_register();
  1.1084 +  }
  1.1085 +
  1.1086 +  LIR_Opr left = LIR_OprFact::illegalOpr;
  1.1087 +  LIR_Opr right = LIR_OprFact::illegalOpr;
  1.1088 +
  1.1089 +  xin->load_item();
  1.1090 +  left = xin->result();
  1.1091 +
  1.1092 +  if (is_simm13(yin->result())) {
  1.1093 +    // inline int constants which are small enough to be immediate operands
  1.1094 +    right = LIR_OprFact::value_type(yin->value()->type());
  1.1095 +  } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
  1.1096 +             (cond == If::eql || cond == If::neq)) {
  1.1097 +    // inline long zero
  1.1098 +    right = LIR_OprFact::value_type(yin->value()->type());
  1.1099 +  } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
  1.1100 +    right = LIR_OprFact::value_type(yin->value()->type());
  1.1101 +  } else {
  1.1102 +    yin->load_item();
  1.1103 +    right = yin->result();
  1.1104 +  }
  1.1105 +  set_no_result(x);
  1.1106 +
  1.1107 +  // add safepoint before generating condition code so it can be recomputed
  1.1108 +  if (x->is_safepoint()) {
  1.1109 +    // increment backedge counter if needed
  1.1110 +    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
  1.1111 +    __ safepoint(new_register(T_INT), state_for(x, x->state_before()));
  1.1112 +  }
  1.1113 +
  1.1114 +  __ cmp(lir_cond(cond), left, right);
  1.1115 +  // Generate branch profiling. Profiling code doesn't kill flags.
  1.1116 +  profile_branch(x, cond);
  1.1117 +  move_to_phi(x->state());
  1.1118 +  if (x->x()->type()->is_float_kind()) {
  1.1119 +    __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
  1.1120 +  } else {
  1.1121 +    __ branch(lir_cond(cond), right->type(), x->tsux());
  1.1122 +  }
  1.1123 +  assert(x->default_sux() == x->fsux(), "wrong destination above");
  1.1124 +  __ jump(x->default_sux());
  1.1125 +}
  1.1126 +
  1.1127 +
  1.1128 +LIR_Opr LIRGenerator::getThreadPointer() {
  1.1129 +  return FrameMap::as_pointer_opr(G2);
  1.1130 +}
  1.1131 +
  1.1132 +
  1.1133 +void LIRGenerator::trace_block_entry(BlockBegin* block) {
  1.1134 +  __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::O0_opr);
  1.1135 +  LIR_OprList* args = new LIR_OprList(1);
  1.1136 +  args->append(FrameMap::O0_opr);
  1.1137 +  address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
  1.1138 +  __ call_runtime_leaf(func, rlock_callee_saved(T_INT), LIR_OprFact::illegalOpr, args);
  1.1139 +}
  1.1140 +
  1.1141 +
  1.1142 +void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
  1.1143 +                                        CodeEmitInfo* info) {
  1.1144 +#ifdef _LP64
  1.1145 +  __ store(value, address, info);
  1.1146 +#else
  1.1147 +  __ volatile_store_mem_reg(value, address, info);
  1.1148 +#endif
  1.1149 +}
  1.1150 +
  1.1151 +void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
  1.1152 +                                       CodeEmitInfo* info) {
  1.1153 +#ifdef _LP64
  1.1154 +  __ load(address, result, info);
  1.1155 +#else
  1.1156 +  __ volatile_load_mem_reg(address, result, info);
  1.1157 +#endif
  1.1158 +}
  1.1159 +
  1.1160 +
  1.1161 +void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
  1.1162 +                                     BasicType type, bool is_volatile) {
  1.1163 +  LIR_Opr base_op = src;
  1.1164 +  LIR_Opr index_op = offset;
  1.1165 +
  1.1166 +  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
  1.1167 +#ifndef _LP64
  1.1168 +  if (is_volatile && type == T_LONG) {
  1.1169 +    __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
  1.1170 +  } else
  1.1171 +#endif
  1.1172 +    {
  1.1173 +      if (type == T_BOOLEAN) {
  1.1174 +        type = T_BYTE;
  1.1175 +      }
  1.1176 +      LIR_Address* addr;
  1.1177 +      if (type == T_ARRAY || type == T_OBJECT) {
  1.1178 +        LIR_Opr tmp = new_pointer_register();
  1.1179 +        __ add(base_op, index_op, tmp);
  1.1180 +        addr = new LIR_Address(tmp, type);
  1.1181 +      } else {
  1.1182 +        addr = new LIR_Address(base_op, index_op, type);
  1.1183 +      }
  1.1184 +
  1.1185 +      if (is_obj) {
  1.1186 +        pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
  1.1187 +                    true /* do_load */, false /* patch */, NULL);
  1.1188 +        // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
  1.1189 +      }
  1.1190 +      __ move(data, addr);
  1.1191 +      if (is_obj) {
  1.1192 +        // This address is precise
  1.1193 +        post_barrier(LIR_OprFact::address(addr), data);
  1.1194 +      }
  1.1195 +    }
  1.1196 +}
  1.1197 +
  1.1198 +
  1.1199 +void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
  1.1200 +                                     BasicType type, bool is_volatile) {
  1.1201 +#ifndef _LP64
  1.1202 +  if (is_volatile && type == T_LONG) {
  1.1203 +    __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
  1.1204 +  } else
  1.1205 +#endif
  1.1206 +    {
  1.1207 +    LIR_Address* addr = new LIR_Address(src, offset, type);
  1.1208 +    __ load(addr, dst);
  1.1209 +  }
  1.1210 +}
  1.1211 +
  1.1212 +void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
  1.1213 +  BasicType type = x->basic_type();
  1.1214 +  LIRItem src(x->object(), this);
  1.1215 +  LIRItem off(x->offset(), this);
  1.1216 +  LIRItem value(x->value(), this);
  1.1217 +
  1.1218 +  src.load_item();
  1.1219 +  value.load_item();
  1.1220 +  off.load_nonconstant();
  1.1221 +
  1.1222 +  LIR_Opr dst = rlock_result(x, type);
  1.1223 +  LIR_Opr data = value.result();
  1.1224 +  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
  1.1225 +  LIR_Opr offset = off.result();
  1.1226 +
  1.1227 +  // Because we want a 2-arg form of xchg
  1.1228 +  __ move(data, dst);
  1.1229 +
  1.1230 +  assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type");
  1.1231 +  LIR_Address* addr;
  1.1232 +  if (offset->is_constant()) {
  1.1233 +
  1.1234 +#ifdef _LP64
  1.1235 +    jlong l = offset->as_jlong();
  1.1236 +    assert((jlong)((jint)l) == l, "offset too large for constant");
  1.1237 +    jint c = (jint)l;
  1.1238 +#else
  1.1239 +    jint c = offset->as_jint();
  1.1240 +#endif
  1.1241 +    addr = new LIR_Address(src.result(), c, type);
  1.1242 +  } else {
  1.1243 +    addr = new LIR_Address(src.result(), offset, type);
  1.1244 +  }
  1.1245 +
  1.1246 +  LIR_Opr tmp = LIR_OprFact::illegalOpr;
  1.1247 +  LIR_Opr ptr = LIR_OprFact::illegalOpr;
  1.1248 +
  1.1249 +  if (is_obj) {
  1.1250 +    // Do the pre-write barrier, if any.
  1.1251 +    // barriers on sparc don't work with a base + index address
  1.1252 +    tmp = FrameMap::G3_opr;
  1.1253 +    ptr = new_pointer_register();
  1.1254 +    __ add(src.result(), off.result(), ptr);
  1.1255 +    pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
  1.1256 +                true /* do_load */, false /* patch */, NULL);
  1.1257 +  }
  1.1258 +  __ xchg(LIR_OprFact::address(addr), dst, dst, tmp);
  1.1259 +  if (is_obj) {
  1.1260 +    // Seems to be a precise address
  1.1261 +    post_barrier(ptr, data);
  1.1262 +  }
  1.1263 +}

mercurial