src/share/vm/c1/c1_LIR.hpp

changeset 435
a61af66fc99e
child 739
dc7f315e41f7
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,2034 @@
     1.4 +/*
     1.5 + * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +class BlockBegin;
    1.29 +class BlockList;
    1.30 +class LIR_Assembler;
    1.31 +class CodeEmitInfo;
    1.32 +class CodeStub;
    1.33 +class CodeStubList;
    1.34 +class ArrayCopyStub;
    1.35 +class LIR_Op;
    1.36 +class ciType;
    1.37 +class ValueType;
    1.38 +class LIR_OpVisitState;
    1.39 +class FpuStackSim;
    1.40 +
    1.41 +//---------------------------------------------------------------------
    1.42 +//                 LIR Operands
    1.43 +//  LIR_OprDesc
    1.44 +//    LIR_OprPtr
    1.45 +//      LIR_Const
    1.46 +//      LIR_Address
    1.47 +//---------------------------------------------------------------------
    1.48 +class LIR_OprDesc;
    1.49 +class LIR_OprPtr;
    1.50 +class LIR_Const;
    1.51 +class LIR_Address;
    1.52 +class LIR_OprVisitor;
    1.53 +
    1.54 +
    1.55 +typedef LIR_OprDesc* LIR_Opr;
    1.56 +typedef int          RegNr;
    1.57 +
    1.58 +define_array(LIR_OprArray, LIR_Opr)
    1.59 +define_stack(LIR_OprList, LIR_OprArray)
    1.60 +
    1.61 +define_array(LIR_OprRefArray, LIR_Opr*)
    1.62 +define_stack(LIR_OprRefList, LIR_OprRefArray)
    1.63 +
    1.64 +define_array(CodeEmitInfoArray, CodeEmitInfo*)
    1.65 +define_stack(CodeEmitInfoList, CodeEmitInfoArray)
    1.66 +
    1.67 +define_array(LIR_OpArray, LIR_Op*)
    1.68 +define_stack(LIR_OpList, LIR_OpArray)
    1.69 +
    1.70 +// define LIR_OprPtr early so LIR_OprDesc can refer to it
    1.71 +class LIR_OprPtr: public CompilationResourceObj {
    1.72 + public:
    1.73 +  bool is_oop_pointer() const                    { return (type() == T_OBJECT); }
    1.74 +  bool is_float_kind() const                     { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
    1.75 +
    1.76 +  virtual LIR_Const*  as_constant()              { return NULL; }
    1.77 +  virtual LIR_Address* as_address()              { return NULL; }
    1.78 +  virtual BasicType type() const                 = 0;
    1.79 +  virtual void print_value_on(outputStream* out) const = 0;
    1.80 +};
    1.81 +
    1.82 +
    1.83 +
    1.84 +// LIR constants
    1.85 +class LIR_Const: public LIR_OprPtr {
    1.86 + private:
    1.87 +  JavaValue _value;
    1.88 +
    1.89 +  void type_check(BasicType t) const   { assert(type() == t, "type check"); }
    1.90 +  void type_check(BasicType t1, BasicType t2) const   { assert(type() == t1 || type() == t2, "type check"); }
    1.91 +
    1.92 + public:
    1.93 +  LIR_Const(jint i)                              { _value.set_type(T_INT);     _value.set_jint(i); }
    1.94 +  LIR_Const(jlong l)                             { _value.set_type(T_LONG);    _value.set_jlong(l); }
    1.95 +  LIR_Const(jfloat f)                            { _value.set_type(T_FLOAT);   _value.set_jfloat(f); }
    1.96 +  LIR_Const(jdouble d)                           { _value.set_type(T_DOUBLE);  _value.set_jdouble(d); }
    1.97 +  LIR_Const(jobject o)                           { _value.set_type(T_OBJECT);  _value.set_jobject(o); }
    1.98 +  LIR_Const(void* p) {
    1.99 +#ifdef _LP64
   1.100 +    assert(sizeof(jlong) >= sizeof(p), "too small");;
   1.101 +    _value.set_type(T_LONG);    _value.set_jlong((jlong)p);
   1.102 +#else
   1.103 +    assert(sizeof(jint) >= sizeof(p), "too small");;
   1.104 +    _value.set_type(T_INT);     _value.set_jint((jint)p);
   1.105 +#endif
   1.106 +  }
   1.107 +
   1.108 +  virtual BasicType type()       const { return _value.get_type(); }
   1.109 +  virtual LIR_Const* as_constant()     { return this; }
   1.110 +
   1.111 +  jint      as_jint()    const         { type_check(T_INT   ); return _value.get_jint(); }
   1.112 +  jlong     as_jlong()   const         { type_check(T_LONG  ); return _value.get_jlong(); }
   1.113 +  jfloat    as_jfloat()  const         { type_check(T_FLOAT ); return _value.get_jfloat(); }
   1.114 +  jdouble   as_jdouble() const         { type_check(T_DOUBLE); return _value.get_jdouble(); }
   1.115 +  jobject   as_jobject() const         { type_check(T_OBJECT); return _value.get_jobject(); }
   1.116 +  jint      as_jint_lo() const         { type_check(T_LONG  ); return low(_value.get_jlong()); }
   1.117 +  jint      as_jint_hi() const         { type_check(T_LONG  ); return high(_value.get_jlong()); }
   1.118 +
   1.119 +#ifdef _LP64
   1.120 +  address   as_pointer() const         { type_check(T_LONG  ); return (address)_value.get_jlong(); }
   1.121 +#else
   1.122 +  address   as_pointer() const         { type_check(T_INT   ); return (address)_value.get_jint(); }
   1.123 +#endif
   1.124 +
   1.125 +
   1.126 +  jint      as_jint_bits() const       { type_check(T_FLOAT, T_INT); return _value.get_jint(); }
   1.127 +  jint      as_jint_lo_bits() const    {
   1.128 +    if (type() == T_DOUBLE) {
   1.129 +      return low(jlong_cast(_value.get_jdouble()));
   1.130 +    } else {
   1.131 +      return as_jint_lo();
   1.132 +    }
   1.133 +  }
   1.134 +  jint      as_jint_hi_bits() const    {
   1.135 +    if (type() == T_DOUBLE) {
   1.136 +      return high(jlong_cast(_value.get_jdouble()));
   1.137 +    } else {
   1.138 +      return as_jint_hi();
   1.139 +    }
   1.140 +  }
   1.141 +
   1.142 +  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
   1.143 +
   1.144 +
   1.145 +  bool is_zero_float() {
   1.146 +    jfloat f = as_jfloat();
   1.147 +    jfloat ok = 0.0f;
   1.148 +    return jint_cast(f) == jint_cast(ok);
   1.149 +  }
   1.150 +
   1.151 +  bool is_one_float() {
   1.152 +    jfloat f = as_jfloat();
   1.153 +    return !g_isnan(f) && g_isfinite(f) && f == 1.0;
   1.154 +  }
   1.155 +
   1.156 +  bool is_zero_double() {
   1.157 +    jdouble d = as_jdouble();
   1.158 +    jdouble ok = 0.0;
   1.159 +    return jlong_cast(d) == jlong_cast(ok);
   1.160 +  }
   1.161 +
   1.162 +  bool is_one_double() {
   1.163 +    jdouble d = as_jdouble();
   1.164 +    return !g_isnan(d) && g_isfinite(d) && d == 1.0;
   1.165 +  }
   1.166 +};
   1.167 +
   1.168 +
   1.169 +//---------------------LIR Operand descriptor------------------------------------
   1.170 +//
   1.171 +// The class LIR_OprDesc represents a LIR instruction operand;
   1.172 +// it can be a register (ALU/FPU), stack location or a constant;
   1.173 +// Constants and addresses are represented as resource area allocated
   1.174 +// structures (see above).
   1.175 +// Registers and stack locations are inlined into the this pointer
   1.176 +// (see value function).
   1.177 +
   1.178 +class LIR_OprDesc: public CompilationResourceObj {
   1.179 + public:
   1.180 +  // value structure:
   1.181 +  //     data       opr-type opr-kind
   1.182 +  // +--------------+-------+-------+
   1.183 +  // [max...........|7 6 5 4|3 2 1 0]
   1.184 +  //                             ^
   1.185 +  //                    is_pointer bit
   1.186 +  //
   1.187 +  // lowest bit cleared, means it is a structure pointer
   1.188 +  // we need  4 bits to represent types
   1.189 +
   1.190 + private:
   1.191 +  friend class LIR_OprFact;
   1.192 +
   1.193 +  // Conversion
   1.194 +  intptr_t value() const                         { return (intptr_t) this; }
   1.195 +
   1.196 +  bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
   1.197 +    return (value() & mask) == masked_value;
   1.198 +  }
   1.199 +
   1.200 +  enum OprKind {
   1.201 +      pointer_value      = 0
   1.202 +    , stack_value        = 1
   1.203 +    , cpu_register       = 3
   1.204 +    , fpu_register       = 5
   1.205 +    , illegal_value      = 7
   1.206 +  };
   1.207 +
   1.208 +  enum OprBits {
   1.209 +      pointer_bits   = 1
   1.210 +    , kind_bits      = 3
   1.211 +    , type_bits      = 4
   1.212 +    , size_bits      = 2
   1.213 +    , destroys_bits  = 1
   1.214 +    , virtual_bits   = 1
   1.215 +    , is_xmm_bits    = 1
   1.216 +    , last_use_bits  = 1
   1.217 +    , is_fpu_stack_offset_bits = 1        // used in assertion checking on x86 for FPU stack slot allocation
   1.218 +    , non_data_bits  = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits +
   1.219 +                       is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits
   1.220 +    , data_bits      = BitsPerInt - non_data_bits
   1.221 +    , reg_bits       = data_bits / 2      // for two registers in one value encoding
   1.222 +  };
   1.223 +
   1.224 +  enum OprShift {
   1.225 +      kind_shift     = 0
   1.226 +    , type_shift     = kind_shift     + kind_bits
   1.227 +    , size_shift     = type_shift     + type_bits
   1.228 +    , destroys_shift = size_shift     + size_bits
   1.229 +    , last_use_shift = destroys_shift + destroys_bits
   1.230 +    , is_fpu_stack_offset_shift = last_use_shift + last_use_bits
   1.231 +    , virtual_shift  = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
   1.232 +    , is_xmm_shift   = virtual_shift + virtual_bits
   1.233 +    , data_shift     = is_xmm_shift + is_xmm_bits
   1.234 +    , reg1_shift = data_shift
   1.235 +    , reg2_shift = data_shift + reg_bits
   1.236 +
   1.237 +  };
   1.238 +
   1.239 +  enum OprSize {
   1.240 +      single_size = 0 << size_shift
   1.241 +    , double_size = 1 << size_shift
   1.242 +  };
   1.243 +
   1.244 +  enum OprMask {
   1.245 +      kind_mask      = right_n_bits(kind_bits)
   1.246 +    , type_mask      = right_n_bits(type_bits) << type_shift
   1.247 +    , size_mask      = right_n_bits(size_bits) << size_shift
   1.248 +    , last_use_mask  = right_n_bits(last_use_bits) << last_use_shift
   1.249 +    , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
   1.250 +    , virtual_mask   = right_n_bits(virtual_bits) << virtual_shift
   1.251 +    , is_xmm_mask    = right_n_bits(is_xmm_bits) << is_xmm_shift
   1.252 +    , pointer_mask   = right_n_bits(pointer_bits)
   1.253 +    , lower_reg_mask = right_n_bits(reg_bits)
   1.254 +    , no_type_mask   = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
   1.255 +  };
   1.256 +
   1.257 +  uintptr_t data() const                         { return value() >> data_shift; }
   1.258 +  int lo_reg_half() const                        { return data() & lower_reg_mask; }
   1.259 +  int hi_reg_half() const                        { return (data() >> reg_bits) & lower_reg_mask; }
   1.260 +  OprKind kind_field() const                     { return (OprKind)(value() & kind_mask); }
   1.261 +  OprSize size_field() const                     { return (OprSize)(value() & size_mask); }
   1.262 +
   1.263 +  static char type_char(BasicType t);
   1.264 +
   1.265 + public:
   1.266 +  enum {
   1.267 +    vreg_base = ConcreteRegisterImpl::number_of_registers,
   1.268 +    vreg_max = (1 << data_bits) - 1
   1.269 +  };
   1.270 +
   1.271 +  static inline LIR_Opr illegalOpr();
   1.272 +
   1.273 +  enum OprType {
   1.274 +      unknown_type  = 0 << type_shift    // means: not set (catch uninitialized types)
   1.275 +    , int_type      = 1 << type_shift
   1.276 +    , long_type     = 2 << type_shift
   1.277 +    , object_type   = 3 << type_shift
   1.278 +    , pointer_type  = 4 << type_shift
   1.279 +    , float_type    = 5 << type_shift
   1.280 +    , double_type   = 6 << type_shift
   1.281 +  };
   1.282 +  friend OprType as_OprType(BasicType t);
   1.283 +  friend BasicType as_BasicType(OprType t);
   1.284 +
   1.285 +  OprType type_field_valid() const               { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
   1.286 +  OprType type_field() const                     { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
   1.287 +
   1.288 +  static OprSize size_for(BasicType t) {
   1.289 +    switch (t) {
   1.290 +      case T_LONG:
   1.291 +      case T_DOUBLE:
   1.292 +        return double_size;
   1.293 +        break;
   1.294 +
   1.295 +      case T_FLOAT:
   1.296 +      case T_BOOLEAN:
   1.297 +      case T_CHAR:
   1.298 +      case T_BYTE:
   1.299 +      case T_SHORT:
   1.300 +      case T_INT:
   1.301 +      case T_OBJECT:
   1.302 +      case T_ARRAY:
   1.303 +        return single_size;
   1.304 +        break;
   1.305 +
   1.306 +      default:
   1.307 +        ShouldNotReachHere();
   1.308 +      }
   1.309 +  }
   1.310 +
   1.311 +
   1.312 +  void validate_type() const PRODUCT_RETURN;
   1.313 +
   1.314 +  BasicType type() const {
   1.315 +    if (is_pointer()) {
   1.316 +      return pointer()->type();
   1.317 +    }
   1.318 +    return as_BasicType(type_field());
   1.319 +  }
   1.320 +
   1.321 +
   1.322 +  ValueType* value_type() const                  { return as_ValueType(type()); }
   1.323 +
   1.324 +  char type_char() const                         { return type_char((is_pointer()) ? pointer()->type() : type()); }
   1.325 +
   1.326 +  bool is_equal(LIR_Opr opr) const         { return this == opr; }
   1.327 +  // checks whether types are same
   1.328 +  bool is_same_type(LIR_Opr opr) const     {
   1.329 +    assert(type_field() != unknown_type &&
   1.330 +           opr->type_field() != unknown_type, "shouldn't see unknown_type");
   1.331 +    return type_field() == opr->type_field();
   1.332 +  }
   1.333 +  bool is_same_register(LIR_Opr opr) {
   1.334 +    return (is_register() && opr->is_register() &&
   1.335 +            kind_field() == opr->kind_field() &&
   1.336 +            (value() & no_type_mask) == (opr->value() & no_type_mask));
   1.337 +  }
   1.338 +
   1.339 +  bool is_pointer() const      { return check_value_mask(pointer_mask, pointer_value); }
   1.340 +  bool is_illegal() const      { return kind_field() == illegal_value; }
   1.341 +  bool is_valid() const        { return kind_field() != illegal_value; }
   1.342 +
   1.343 +  bool is_register() const     { return is_cpu_register() || is_fpu_register(); }
   1.344 +  bool is_virtual() const      { return is_virtual_cpu()  || is_virtual_fpu();  }
   1.345 +
   1.346 +  bool is_constant() const     { return is_pointer() && pointer()->as_constant() != NULL; }
   1.347 +  bool is_address() const      { return is_pointer() && pointer()->as_address() != NULL; }
   1.348 +
   1.349 +  bool is_float_kind() const   { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
   1.350 +  bool is_oop() const;
   1.351 +
   1.352 +  // semantic for fpu- and xmm-registers:
   1.353 +  // * is_float and is_double return true for xmm_registers
   1.354 +  //   (so is_single_fpu and is_single_xmm are true)
   1.355 +  // * So you must always check for is_???_xmm prior to is_???_fpu to
   1.356 +  //   distinguish between fpu- and xmm-registers
   1.357 +
   1.358 +  bool is_stack() const        { validate_type(); return check_value_mask(kind_mask,                stack_value);                 }
   1.359 +  bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | single_size);  }
   1.360 +  bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask,    stack_value  | double_size);  }
   1.361 +
   1.362 +  bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask,                cpu_register);                }
   1.363 +  bool is_virtual_cpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
   1.364 +  bool is_fixed_cpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register);                }
   1.365 +  bool is_single_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | single_size);  }
   1.366 +  bool is_double_cpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    cpu_register | double_size);  }
   1.367 +
   1.368 +  bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask,                fpu_register);                }
   1.369 +  bool is_virtual_fpu() const  { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
   1.370 +  bool is_fixed_fpu() const    { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register);                }
   1.371 +  bool is_single_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | single_size);  }
   1.372 +  bool is_double_fpu() const   { validate_type(); return check_value_mask(kind_mask | size_mask,    fpu_register | double_size);  }
   1.373 +
   1.374 +  bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask,             fpu_register | is_xmm_mask); }
   1.375 +  bool is_single_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
   1.376 +  bool is_double_xmm() const   { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
   1.377 +
   1.378 +  // fast accessor functions for special bits that do not work for pointers
   1.379 +  // (in this functions, the check for is_pointer() is omitted)
   1.380 +  bool is_single_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
   1.381 +  bool is_double_word() const      { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
   1.382 +  bool is_virtual_register() const { assert(is_register(),               "type check"); return check_value_mask(virtual_mask, virtual_mask); }
   1.383 +  bool is_oop_register() const     { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
   1.384 +  BasicType type_register() const  { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid());  }
   1.385 +
   1.386 +  bool is_last_use() const         { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
   1.387 +  bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
   1.388 +  LIR_Opr make_last_use()          { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
   1.389 +  LIR_Opr make_fpu_stack_offset()  { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
   1.390 +
   1.391 +
   1.392 +  int single_stack_ix() const  { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
   1.393 +  int double_stack_ix() const  { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
   1.394 +  RegNr cpu_regnr() const      { assert(is_single_cpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
   1.395 +  RegNr cpu_regnrLo() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
   1.396 +  RegNr cpu_regnrHi() const    { assert(is_double_cpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
   1.397 +  RegNr fpu_regnr() const      { assert(is_single_fpu()   && !is_virtual(), "type check"); return (RegNr)data(); }
   1.398 +  RegNr fpu_regnrLo() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
   1.399 +  RegNr fpu_regnrHi() const    { assert(is_double_fpu()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
   1.400 +  RegNr xmm_regnr() const      { assert(is_single_xmm()   && !is_virtual(), "type check"); return (RegNr)data(); }
   1.401 +  RegNr xmm_regnrLo() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
   1.402 +  RegNr xmm_regnrHi() const    { assert(is_double_xmm()   && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
   1.403 +  int   vreg_number() const    { assert(is_virtual(),                       "type check"); return (RegNr)data(); }
   1.404 +
   1.405 +  LIR_OprPtr* pointer()  const                   { assert(is_pointer(), "type check");      return (LIR_OprPtr*)this; }
   1.406 +  LIR_Const* as_constant_ptr() const             { return pointer()->as_constant(); }
   1.407 +  LIR_Address* as_address_ptr() const            { return pointer()->as_address(); }
   1.408 +
   1.409 +  Register as_register()    const;
   1.410 +  Register as_register_lo() const;
   1.411 +  Register as_register_hi() const;
   1.412 +
   1.413 +  Register as_pointer_register() {
   1.414 +#ifdef _LP64
   1.415 +    if (is_double_cpu()) {
   1.416 +      assert(as_register_lo() == as_register_hi(), "should be a single register");
   1.417 +      return as_register_lo();
   1.418 +    }
   1.419 +#endif
   1.420 +    return as_register();
   1.421 +  }
   1.422 +
   1.423 +#ifdef IA32
   1.424 +  XMMRegister as_xmm_float_reg() const;
   1.425 +  XMMRegister as_xmm_double_reg() const;
   1.426 +  // for compatibility with RInfo
   1.427 +  int fpu () const                                  { return lo_reg_half(); }
   1.428 +#endif
   1.429 +
   1.430 +#ifdef SPARC
   1.431 +  FloatRegister as_float_reg   () const;
   1.432 +  FloatRegister as_double_reg  () const;
   1.433 +#endif
   1.434 +
   1.435 +  jint      as_jint()    const { return as_constant_ptr()->as_jint(); }
   1.436 +  jlong     as_jlong()   const { return as_constant_ptr()->as_jlong(); }
   1.437 +  jfloat    as_jfloat()  const { return as_constant_ptr()->as_jfloat(); }
   1.438 +  jdouble   as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
   1.439 +  jobject   as_jobject() const { return as_constant_ptr()->as_jobject(); }
   1.440 +
   1.441 +  void print() const PRODUCT_RETURN;
   1.442 +  void print(outputStream* out) const PRODUCT_RETURN;
   1.443 +};
   1.444 +
   1.445 +
   1.446 +inline LIR_OprDesc::OprType as_OprType(BasicType type) {
   1.447 +  switch (type) {
   1.448 +  case T_INT:      return LIR_OprDesc::int_type;
   1.449 +  case T_LONG:     return LIR_OprDesc::long_type;
   1.450 +  case T_FLOAT:    return LIR_OprDesc::float_type;
   1.451 +  case T_DOUBLE:   return LIR_OprDesc::double_type;
   1.452 +  case T_OBJECT:
   1.453 +  case T_ARRAY:    return LIR_OprDesc::object_type;
   1.454 +  case T_ILLEGAL:  // fall through
   1.455 +  default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
   1.456 +  }
   1.457 +}
   1.458 +
   1.459 +inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
   1.460 +  switch (t) {
   1.461 +  case LIR_OprDesc::int_type:     return T_INT;
   1.462 +  case LIR_OprDesc::long_type:    return T_LONG;
   1.463 +  case LIR_OprDesc::float_type:   return T_FLOAT;
   1.464 +  case LIR_OprDesc::double_type:  return T_DOUBLE;
   1.465 +  case LIR_OprDesc::object_type:  return T_OBJECT;
   1.466 +  case LIR_OprDesc::unknown_type: // fall through
   1.467 +  default: ShouldNotReachHere();  return T_ILLEGAL;
   1.468 +  }
   1.469 +}
   1.470 +
   1.471 +
   1.472 +// LIR_Address
   1.473 +class LIR_Address: public LIR_OprPtr {
   1.474 + friend class LIR_OpVisitState;
   1.475 +
   1.476 + public:
   1.477 +  // NOTE: currently these must be the log2 of the scale factor (and
   1.478 +  // must also be equivalent to the ScaleFactor enum in
   1.479 +  // assembler_i486.hpp)
   1.480 +  enum Scale {
   1.481 +    times_1  =  0,
   1.482 +    times_2  =  1,
   1.483 +    times_4  =  2,
   1.484 +    times_8  =  3
   1.485 +  };
   1.486 +
   1.487 + private:
   1.488 +  LIR_Opr   _base;
   1.489 +  LIR_Opr   _index;
   1.490 +  Scale     _scale;
   1.491 +  intx      _disp;
   1.492 +  BasicType _type;
   1.493 +
   1.494 + public:
   1.495 +  LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
   1.496 +       _base(base)
   1.497 +     , _index(index)
   1.498 +     , _scale(times_1)
   1.499 +     , _type(type)
   1.500 +     , _disp(0) { verify(); }
   1.501 +
   1.502 +  LIR_Address(LIR_Opr base, int disp, BasicType type):
   1.503 +       _base(base)
   1.504 +     , _index(LIR_OprDesc::illegalOpr())
   1.505 +     , _scale(times_1)
   1.506 +     , _type(type)
   1.507 +     , _disp(disp) { verify(); }
   1.508 +
   1.509 +#ifdef IA32
   1.510 +  LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
   1.511 +       _base(base)
   1.512 +     , _index(index)
   1.513 +     , _scale(scale)
   1.514 +     , _type(type)
   1.515 +     , _disp(disp) { verify(); }
   1.516 +#endif
   1.517 +
   1.518 +  LIR_Opr base()  const                          { return _base;  }
   1.519 +  LIR_Opr index() const                          { return _index; }
   1.520 +  Scale   scale() const                          { return _scale; }
   1.521 +  intx    disp()  const                          { return _disp;  }
   1.522 +
   1.523 +  bool equals(LIR_Address* other) const          { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
   1.524 +
   1.525 +  virtual LIR_Address* as_address()              { return this;   }
   1.526 +  virtual BasicType type() const                 { return _type; }
   1.527 +  virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
   1.528 +
   1.529 +  void verify() const PRODUCT_RETURN;
   1.530 +
   1.531 +  static Scale scale(BasicType type);
   1.532 +};
   1.533 +
   1.534 +
   1.535 +// operand factory
   1.536 +class LIR_OprFact: public AllStatic {
   1.537 + public:
   1.538 +
   1.539 +  static LIR_Opr illegalOpr;
   1.540 +
   1.541 +  static LIR_Opr single_cpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::int_type    | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
   1.542 +  static LIR_Opr single_cpu_oop(int reg)        { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
   1.543 +  static LIR_Opr double_cpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::long_type   | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
   1.544 +
   1.545 +  static LIR_Opr single_fpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); }
   1.546 +
   1.547 +#ifdef SPARC
   1.548 +  static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
   1.549 +#endif
   1.550 +#ifdef IA32
   1.551 +  static LIR_Opr double_fpu(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) | (reg  << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
   1.552 +  static LIR_Opr single_xmm(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) |                                     LIR_OprDesc::float_type  | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::is_xmm_mask); }
   1.553 +  static LIR_Opr double_xmm(int reg)            { return (LIR_Opr)((reg  << LIR_OprDesc::reg1_shift) | (reg  << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::is_xmm_mask); }
   1.554 +#endif
   1.555 +
   1.556 +
   1.557 +  static LIR_Opr virtual_register(int index, BasicType type) {
   1.558 +    LIR_Opr res;
   1.559 +    switch (type) {
   1.560 +      case T_OBJECT: // fall through
   1.561 +      case T_ARRAY:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
   1.562 +      case T_INT:    res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type    | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
   1.563 +      case T_LONG:   res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type   | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break;
   1.564 +      case T_FLOAT:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type  | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
   1.565 +      case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break;
   1.566 +
   1.567 +      default:       ShouldNotReachHere(); res = illegalOpr;
   1.568 +    }
   1.569 +
   1.570 +#ifdef ASSERT
   1.571 +    res->validate_type();
   1.572 +    assert(res->vreg_number() == index, "conversion check");
   1.573 +    assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
   1.574 +    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
   1.575 +
   1.576 +    // old-style calculation; check if old and new method are equal
   1.577 +    LIR_OprDesc::OprType t = as_OprType(type);
   1.578 +    LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | t |
   1.579 +                               ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
   1.580 +                               LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
   1.581 +    assert(res == old_res, "old and new method not equal");
   1.582 +#endif
   1.583 +
   1.584 +    return res;
   1.585 +  }
   1.586 +
   1.587 +  // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
   1.588 +  // the index is platform independent; a double stack useing indeces 2 and 3 has always
   1.589 +  // index 2.
   1.590 +  static LIR_Opr stack(int index, BasicType type) {
   1.591 +    LIR_Opr res;
   1.592 +    switch (type) {
   1.593 +      case T_OBJECT: // fall through
   1.594 +      case T_ARRAY:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
   1.595 +      case T_INT:    res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type    | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
   1.596 +      case T_LONG:   res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type   | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break;
   1.597 +      case T_FLOAT:  res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type  | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
   1.598 +      case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break;
   1.599 +
   1.600 +      default:       ShouldNotReachHere(); res = illegalOpr;
   1.601 +    }
   1.602 +
   1.603 +#ifdef ASSERT
   1.604 +    assert(index >= 0, "index must be positive");
   1.605 +    assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
   1.606 +
   1.607 +    LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::stack_value | as_OprType(type) | LIR_OprDesc::size_for(type));
   1.608 +    assert(res == old_res, "old and new method not equal");
   1.609 +#endif
   1.610 +
   1.611 +    return res;
   1.612 +  }
   1.613 +
   1.614 +  static LIR_Opr intConst(jint i)                { return (LIR_Opr)(new LIR_Const(i)); }
   1.615 +  static LIR_Opr longConst(jlong l)              { return (LIR_Opr)(new LIR_Const(l)); }
   1.616 +  static LIR_Opr floatConst(jfloat f)            { return (LIR_Opr)(new LIR_Const(f)); }
   1.617 +  static LIR_Opr doubleConst(jdouble d)          { return (LIR_Opr)(new LIR_Const(d)); }
   1.618 +  static LIR_Opr oopConst(jobject o)             { return (LIR_Opr)(new LIR_Const(o)); }
   1.619 +  static LIR_Opr address(LIR_Address* a)         { return (LIR_Opr)a; }
   1.620 +  static LIR_Opr intptrConst(void* p)            { return (LIR_Opr)(new LIR_Const(p)); }
   1.621 +  static LIR_Opr intptrConst(intptr_t v)         { return (LIR_Opr)(new LIR_Const((void*)v)); }
   1.622 +  static LIR_Opr illegal()                       { return (LIR_Opr)-1; }
   1.623 +
   1.624 +  static LIR_Opr value_type(ValueType* type);
   1.625 +  static LIR_Opr dummy_value_type(ValueType* type);
   1.626 +};
   1.627 +
   1.628 +
   1.629 +//-------------------------------------------------------------------------------
   1.630 +//                   LIR Instructions
   1.631 +//-------------------------------------------------------------------------------
   1.632 +//
   1.633 +// Note:
   1.634 +//  - every instruction has a result operand
   1.635 +//  - every instruction has an CodeEmitInfo operand (can be revisited later)
   1.636 +//  - every instruction has a LIR_OpCode operand
   1.637 +//  - LIR_OpN, means an instruction that has N input operands
   1.638 +//
   1.639 +// class hierarchy:
   1.640 +//
   1.641 +class  LIR_Op;
   1.642 +class    LIR_Op0;
   1.643 +class      LIR_OpLabel;
   1.644 +class    LIR_Op1;
   1.645 +class      LIR_OpBranch;
   1.646 +class      LIR_OpConvert;
   1.647 +class      LIR_OpAllocObj;
   1.648 +class      LIR_OpRoundFP;
   1.649 +class    LIR_Op2;
   1.650 +class    LIR_OpDelay;
   1.651 +class    LIR_Op3;
   1.652 +class      LIR_OpAllocArray;
   1.653 +class    LIR_OpCall;
   1.654 +class      LIR_OpJavaCall;
   1.655 +class      LIR_OpRTCall;
   1.656 +class    LIR_OpArrayCopy;
   1.657 +class    LIR_OpLock;
   1.658 +class    LIR_OpTypeCheck;
   1.659 +class    LIR_OpCompareAndSwap;
   1.660 +class    LIR_OpProfileCall;
   1.661 +
   1.662 +
   1.663 +// LIR operation codes
   1.664 +enum LIR_Code {
   1.665 +    lir_none
   1.666 +  , begin_op0
   1.667 +      , lir_word_align
   1.668 +      , lir_label
   1.669 +      , lir_nop
   1.670 +      , lir_backwardbranch_target
   1.671 +      , lir_std_entry
   1.672 +      , lir_osr_entry
   1.673 +      , lir_build_frame
   1.674 +      , lir_fpop_raw
   1.675 +      , lir_24bit_FPU
   1.676 +      , lir_reset_FPU
   1.677 +      , lir_breakpoint
   1.678 +      , lir_rtcall
   1.679 +      , lir_membar
   1.680 +      , lir_membar_acquire
   1.681 +      , lir_membar_release
   1.682 +      , lir_get_thread
   1.683 +  , end_op0
   1.684 +  , begin_op1
   1.685 +      , lir_fxch
   1.686 +      , lir_fld
   1.687 +      , lir_ffree
   1.688 +      , lir_push
   1.689 +      , lir_pop
   1.690 +      , lir_null_check
   1.691 +      , lir_return
   1.692 +      , lir_leal
   1.693 +      , lir_neg
   1.694 +      , lir_branch
   1.695 +      , lir_cond_float_branch
   1.696 +      , lir_move
   1.697 +      , lir_prefetchr
   1.698 +      , lir_prefetchw
   1.699 +      , lir_convert
   1.700 +      , lir_alloc_object
   1.701 +      , lir_monaddr
   1.702 +      , lir_roundfp
   1.703 +      , lir_safepoint
   1.704 +  , end_op1
   1.705 +  , begin_op2
   1.706 +      , lir_cmp
   1.707 +      , lir_cmp_l2i
   1.708 +      , lir_ucmp_fd2i
   1.709 +      , lir_cmp_fd2i
   1.710 +      , lir_cmove
   1.711 +      , lir_add
   1.712 +      , lir_sub
   1.713 +      , lir_mul
   1.714 +      , lir_mul_strictfp
   1.715 +      , lir_div
   1.716 +      , lir_div_strictfp
   1.717 +      , lir_rem
   1.718 +      , lir_sqrt
   1.719 +      , lir_abs
   1.720 +      , lir_sin
   1.721 +      , lir_cos
   1.722 +      , lir_tan
   1.723 +      , lir_log
   1.724 +      , lir_log10
   1.725 +      , lir_logic_and
   1.726 +      , lir_logic_or
   1.727 +      , lir_logic_xor
   1.728 +      , lir_shl
   1.729 +      , lir_shr
   1.730 +      , lir_ushr
   1.731 +      , lir_alloc_array
   1.732 +      , lir_throw
   1.733 +      , lir_unwind
   1.734 +      , lir_compare_to
   1.735 +  , end_op2
   1.736 +  , begin_op3
   1.737 +      , lir_idiv
   1.738 +      , lir_irem
   1.739 +  , end_op3
   1.740 +  , begin_opJavaCall
   1.741 +      , lir_static_call
   1.742 +      , lir_optvirtual_call
   1.743 +      , lir_icvirtual_call
   1.744 +      , lir_virtual_call
   1.745 +  , end_opJavaCall
   1.746 +  , begin_opArrayCopy
   1.747 +      , lir_arraycopy
   1.748 +  , end_opArrayCopy
   1.749 +  , begin_opLock
   1.750 +    , lir_lock
   1.751 +    , lir_unlock
   1.752 +  , end_opLock
   1.753 +  , begin_delay_slot
   1.754 +    , lir_delay_slot
   1.755 +  , end_delay_slot
   1.756 +  , begin_opTypeCheck
   1.757 +    , lir_instanceof
   1.758 +    , lir_checkcast
   1.759 +    , lir_store_check
   1.760 +  , end_opTypeCheck
   1.761 +  , begin_opCompareAndSwap
   1.762 +    , lir_cas_long
   1.763 +    , lir_cas_obj
   1.764 +    , lir_cas_int
   1.765 +  , end_opCompareAndSwap
   1.766 +  , begin_opMDOProfile
   1.767 +    , lir_profile_call
   1.768 +  , end_opMDOProfile
   1.769 +};
   1.770 +
   1.771 +
   1.772 +enum LIR_Condition {
   1.773 +    lir_cond_equal
   1.774 +  , lir_cond_notEqual
   1.775 +  , lir_cond_less
   1.776 +  , lir_cond_lessEqual
   1.777 +  , lir_cond_greaterEqual
   1.778 +  , lir_cond_greater
   1.779 +  , lir_cond_belowEqual
   1.780 +  , lir_cond_aboveEqual
   1.781 +  , lir_cond_always
   1.782 +  , lir_cond_unknown = -1
   1.783 +};
   1.784 +
   1.785 +
   1.786 +enum LIR_PatchCode {
   1.787 +  lir_patch_none,
   1.788 +  lir_patch_low,
   1.789 +  lir_patch_high,
   1.790 +  lir_patch_normal
   1.791 +};
   1.792 +
   1.793 +
   1.794 +enum LIR_MoveKind {
   1.795 +  lir_move_normal,
   1.796 +  lir_move_volatile,
   1.797 +  lir_move_unaligned,
   1.798 +  lir_move_max_flag
   1.799 +};
   1.800 +
   1.801 +
   1.802 +// --------------------------------------------------
   1.803 +// LIR_Op
   1.804 +// --------------------------------------------------
   1.805 +class LIR_Op: public CompilationResourceObj {
   1.806 + friend class LIR_OpVisitState;
   1.807 +
   1.808 +#ifdef ASSERT
   1.809 + private:
   1.810 +  const char *  _file;
   1.811 +  int           _line;
   1.812 +#endif
   1.813 +
   1.814 + protected:
   1.815 +  LIR_Opr       _result;
   1.816 +  unsigned short _code;
   1.817 +  unsigned short _flags;
   1.818 +  CodeEmitInfo* _info;
   1.819 +  int           _id;     // value id for register allocation
   1.820 +  int           _fpu_pop_count;
   1.821 +  Instruction*  _source; // for debugging
   1.822 +
   1.823 +  static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
   1.824 +
   1.825 + protected:
   1.826 +  static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end)  { return start < test && test < end; }
   1.827 +
   1.828 + public:
   1.829 +  LIR_Op()
   1.830 +    : _result(LIR_OprFact::illegalOpr)
   1.831 +    , _code(lir_none)
   1.832 +    , _flags(0)
   1.833 +    , _info(NULL)
   1.834 +#ifdef ASSERT
   1.835 +    , _file(NULL)
   1.836 +    , _line(0)
   1.837 +#endif
   1.838 +    , _fpu_pop_count(0)
   1.839 +    , _source(NULL)
   1.840 +    , _id(-1)                             {}
   1.841 +
   1.842 +  LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
   1.843 +    : _result(result)
   1.844 +    , _code(code)
   1.845 +    , _flags(0)
   1.846 +    , _info(info)
   1.847 +#ifdef ASSERT
   1.848 +    , _file(NULL)
   1.849 +    , _line(0)
   1.850 +#endif
   1.851 +    , _fpu_pop_count(0)
   1.852 +    , _source(NULL)
   1.853 +    , _id(-1)                             {}
   1.854 +
   1.855 +  CodeEmitInfo* info() const                  { return _info;   }
   1.856 +  LIR_Code code()      const                  { return (LIR_Code)_code;   }
   1.857 +  LIR_Opr result_opr() const                  { return _result; }
   1.858 +  void    set_result_opr(LIR_Opr opr)         { _result = opr;  }
   1.859 +
   1.860 +#ifdef ASSERT
   1.861 +  void set_file_and_line(const char * file, int line) {
   1.862 +    _file = file;
   1.863 +    _line = line;
   1.864 +  }
   1.865 +#endif
   1.866 +
   1.867 +  virtual const char * name() const PRODUCT_RETURN0;
   1.868 +
   1.869 +  int id()             const                  { return _id;     }
   1.870 +  void set_id(int id)                         { _id = id; }
   1.871 +
   1.872 +  // FPU stack simulation helpers -- only used on Intel
   1.873 +  void set_fpu_pop_count(int count)           { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
   1.874 +  int  fpu_pop_count() const                  { return _fpu_pop_count; }
   1.875 +  bool pop_fpu_stack()                        { return _fpu_pop_count > 0; }
   1.876 +
   1.877 +  Instruction* source() const                 { return _source; }
   1.878 +  void set_source(Instruction* ins)           { _source = ins; }
   1.879 +
   1.880 +  virtual void emit_code(LIR_Assembler* masm) = 0;
   1.881 +  virtual void print_instr(outputStream* out) const   = 0;
   1.882 +  virtual void print_on(outputStream* st) const PRODUCT_RETURN;
   1.883 +
   1.884 +  virtual LIR_OpCall* as_OpCall() { return NULL; }
   1.885 +  virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
   1.886 +  virtual LIR_OpLabel* as_OpLabel() { return NULL; }
   1.887 +  virtual LIR_OpDelay* as_OpDelay() { return NULL; }
   1.888 +  virtual LIR_OpLock* as_OpLock() { return NULL; }
   1.889 +  virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
   1.890 +  virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
   1.891 +  virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
   1.892 +  virtual LIR_OpBranch* as_OpBranch() { return NULL; }
   1.893 +  virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
   1.894 +  virtual LIR_OpConvert* as_OpConvert() { return NULL; }
   1.895 +  virtual LIR_Op0* as_Op0() { return NULL; }
   1.896 +  virtual LIR_Op1* as_Op1() { return NULL; }
   1.897 +  virtual LIR_Op2* as_Op2() { return NULL; }
   1.898 +  virtual LIR_Op3* as_Op3() { return NULL; }
   1.899 +  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
   1.900 +  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   1.901 +  virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   1.902 +  virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
   1.903 +
   1.904 +  virtual void verify() const {}
   1.905 +};
   1.906 +
   1.907 +// for calls
   1.908 +class LIR_OpCall: public LIR_Op {
   1.909 + friend class LIR_OpVisitState;
   1.910 +
   1.911 + protected:
   1.912 +  address      _addr;
   1.913 +  LIR_OprList* _arguments;
   1.914 + protected:
   1.915 +  LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
   1.916 +             LIR_OprList* arguments, CodeEmitInfo* info = NULL)
   1.917 +    : LIR_Op(code, result, info)
   1.918 +    , _arguments(arguments)
   1.919 +    , _addr(addr) {}
   1.920 +
   1.921 + public:
   1.922 +  address addr() const                           { return _addr; }
   1.923 +  const LIR_OprList* arguments() const           { return _arguments; }
   1.924 +  virtual LIR_OpCall* as_OpCall()                { return this; }
   1.925 +};
   1.926 +
   1.927 +
   1.928 +// --------------------------------------------------
   1.929 +// LIR_OpJavaCall
   1.930 +// --------------------------------------------------
   1.931 +class LIR_OpJavaCall: public LIR_OpCall {
   1.932 + friend class LIR_OpVisitState;
   1.933 +
   1.934 + private:
   1.935 +  ciMethod*       _method;
   1.936 +  LIR_Opr         _receiver;
   1.937 +
   1.938 + public:
   1.939 +  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
   1.940 +                 LIR_Opr receiver, LIR_Opr result,
   1.941 +                 address addr, LIR_OprList* arguments,
   1.942 +                 CodeEmitInfo* info)
   1.943 +  : LIR_OpCall(code, addr, result, arguments, info)
   1.944 +  , _receiver(receiver)
   1.945 +  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
   1.946 +
   1.947 +  LIR_OpJavaCall(LIR_Code code, ciMethod* method,
   1.948 +                 LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
   1.949 +                 LIR_OprList* arguments, CodeEmitInfo* info)
   1.950 +  : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
   1.951 +  , _receiver(receiver)
   1.952 +  , _method(method)          { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
   1.953 +
   1.954 +  LIR_Opr receiver() const                       { return _receiver; }
   1.955 +  ciMethod* method() const                       { return _method;   }
   1.956 +
   1.957 +  intptr_t vtable_offset() const {
   1.958 +    assert(_code == lir_virtual_call, "only have vtable for real vcall");
   1.959 +    return (intptr_t) addr();
   1.960 +  }
   1.961 +
   1.962 +  virtual void emit_code(LIR_Assembler* masm);
   1.963 +  virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
   1.964 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   1.965 +};
   1.966 +
   1.967 +// --------------------------------------------------
   1.968 +// LIR_OpLabel
   1.969 +// --------------------------------------------------
   1.970 +// Location where a branch can continue
   1.971 +class LIR_OpLabel: public LIR_Op {
   1.972 + friend class LIR_OpVisitState;
   1.973 +
   1.974 + private:
   1.975 +  Label* _label;
   1.976 + public:
   1.977 +  LIR_OpLabel(Label* lbl)
   1.978 +   : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
   1.979 +   , _label(lbl)                                 {}
   1.980 +  Label* label() const                           { return _label; }
   1.981 +
   1.982 +  virtual void emit_code(LIR_Assembler* masm);
   1.983 +  virtual LIR_OpLabel* as_OpLabel() { return this; }
   1.984 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   1.985 +};
   1.986 +
   1.987 +// LIR_OpArrayCopy
   1.988 +class LIR_OpArrayCopy: public LIR_Op {
   1.989 + friend class LIR_OpVisitState;
   1.990 +
   1.991 + private:
   1.992 +  ArrayCopyStub*  _stub;
   1.993 +  LIR_Opr   _src;
   1.994 +  LIR_Opr   _src_pos;
   1.995 +  LIR_Opr   _dst;
   1.996 +  LIR_Opr   _dst_pos;
   1.997 +  LIR_Opr   _length;
   1.998 +  LIR_Opr   _tmp;
   1.999 +  ciArrayKlass* _expected_type;
  1.1000 +  int       _flags;
  1.1001 +
  1.1002 +public:
  1.1003 +  enum Flags {
  1.1004 +    src_null_check         = 1 << 0,
  1.1005 +    dst_null_check         = 1 << 1,
  1.1006 +    src_pos_positive_check = 1 << 2,
  1.1007 +    dst_pos_positive_check = 1 << 3,
  1.1008 +    length_positive_check  = 1 << 4,
  1.1009 +    src_range_check        = 1 << 5,
  1.1010 +    dst_range_check        = 1 << 6,
  1.1011 +    type_check             = 1 << 7,
  1.1012 +    all_flags              = (1 << 8) - 1
  1.1013 +  };
  1.1014 +
  1.1015 +  LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
  1.1016 +                  ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
  1.1017 +
  1.1018 +  LIR_Opr src() const                            { return _src; }
  1.1019 +  LIR_Opr src_pos() const                        { return _src_pos; }
  1.1020 +  LIR_Opr dst() const                            { return _dst; }
  1.1021 +  LIR_Opr dst_pos() const                        { return _dst_pos; }
  1.1022 +  LIR_Opr length() const                         { return _length; }
  1.1023 +  LIR_Opr tmp() const                            { return _tmp; }
  1.1024 +  int flags() const                              { return _flags; }
  1.1025 +  ciArrayKlass* expected_type() const            { return _expected_type; }
  1.1026 +  ArrayCopyStub* stub() const                    { return _stub; }
  1.1027 +
  1.1028 +  virtual void emit_code(LIR_Assembler* masm);
  1.1029 +  virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
  1.1030 +  void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1031 +};
  1.1032 +
  1.1033 +
  1.1034 +// --------------------------------------------------
  1.1035 +// LIR_Op0
  1.1036 +// --------------------------------------------------
  1.1037 +class LIR_Op0: public LIR_Op {
  1.1038 + friend class LIR_OpVisitState;
  1.1039 +
  1.1040 + public:
  1.1041 +  LIR_Op0(LIR_Code code)
  1.1042 +   : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
  1.1043 +  LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
  1.1044 +   : LIR_Op(code, result, info)  { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
  1.1045 +
  1.1046 +  virtual void emit_code(LIR_Assembler* masm);
  1.1047 +  virtual LIR_Op0* as_Op0() { return this; }
  1.1048 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1049 +};
  1.1050 +
  1.1051 +
  1.1052 +// --------------------------------------------------
  1.1053 +// LIR_Op1
  1.1054 +// --------------------------------------------------
  1.1055 +
  1.1056 +class LIR_Op1: public LIR_Op {
  1.1057 + friend class LIR_OpVisitState;
  1.1058 +
  1.1059 + protected:
  1.1060 +  LIR_Opr         _opr;   // input operand
  1.1061 +  BasicType       _type;  // Operand types
  1.1062 +  LIR_PatchCode   _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
  1.1063 +
  1.1064 +  static void print_patch_code(outputStream* out, LIR_PatchCode code);
  1.1065 +
  1.1066 +  void set_kind(LIR_MoveKind kind) {
  1.1067 +    assert(code() == lir_move, "must be");
  1.1068 +    _flags = kind;
  1.1069 +  }
  1.1070 +
  1.1071 + public:
  1.1072 +  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
  1.1073 +    : LIR_Op(code, result, info)
  1.1074 +    , _opr(opr)
  1.1075 +    , _patch(patch)
  1.1076 +    , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
  1.1077 +
  1.1078 +  LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
  1.1079 +    : LIR_Op(code, result, info)
  1.1080 +    , _opr(opr)
  1.1081 +    , _patch(patch)
  1.1082 +    , _type(type)                      {
  1.1083 +    assert(code == lir_move, "must be");
  1.1084 +    set_kind(kind);
  1.1085 +  }
  1.1086 +
  1.1087 +  LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
  1.1088 +    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
  1.1089 +    , _opr(opr)
  1.1090 +    , _patch(lir_patch_none)
  1.1091 +    , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
  1.1092 +
  1.1093 +  LIR_Opr in_opr()           const               { return _opr;   }
  1.1094 +  LIR_PatchCode patch_code() const               { return _patch; }
  1.1095 +  BasicType type()           const               { return _type;  }
  1.1096 +
  1.1097 +  LIR_MoveKind move_kind() const {
  1.1098 +    assert(code() == lir_move, "must be");
  1.1099 +    return (LIR_MoveKind)_flags;
  1.1100 +  }
  1.1101 +
  1.1102 +  virtual void emit_code(LIR_Assembler* masm);
  1.1103 +  virtual LIR_Op1* as_Op1() { return this; }
  1.1104 +  virtual const char * name() const PRODUCT_RETURN0;
  1.1105 +
  1.1106 +  void set_in_opr(LIR_Opr opr) { _opr = opr; }
  1.1107 +
  1.1108 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1109 +  virtual void verify() const;
  1.1110 +};
  1.1111 +
  1.1112 +
  1.1113 +// for runtime calls
  1.1114 +class LIR_OpRTCall: public LIR_OpCall {
  1.1115 + friend class LIR_OpVisitState;
  1.1116 +
  1.1117 + private:
  1.1118 +  LIR_Opr _tmp;
  1.1119 + public:
  1.1120 +  LIR_OpRTCall(address addr, LIR_Opr tmp,
  1.1121 +               LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
  1.1122 +    : LIR_OpCall(lir_rtcall, addr, result, arguments, info)
  1.1123 +    , _tmp(tmp) {}
  1.1124 +
  1.1125 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1126 +  virtual void emit_code(LIR_Assembler* masm);
  1.1127 +  virtual LIR_OpRTCall* as_OpRTCall() { return this; }
  1.1128 +
  1.1129 +  LIR_Opr tmp() const                            { return _tmp; }
  1.1130 +
  1.1131 +  virtual void verify() const;
  1.1132 +};
  1.1133 +
  1.1134 +
  1.1135 +class LIR_OpBranch: public LIR_Op {
  1.1136 + friend class LIR_OpVisitState;
  1.1137 +
  1.1138 + private:
  1.1139 +  LIR_Condition _cond;
  1.1140 +  BasicType     _type;
  1.1141 +  Label*        _label;
  1.1142 +  BlockBegin*   _block;  // if this is a branch to a block, this is the block
  1.1143 +  BlockBegin*   _ublock; // if this is a float-branch, this is the unorderd block
  1.1144 +  CodeStub*     _stub;   // if this is a branch to a stub, this is the stub
  1.1145 +
  1.1146 + public:
  1.1147 +  LIR_OpBranch(LIR_Condition cond, Label* lbl)
  1.1148 +    : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
  1.1149 +    , _cond(cond)
  1.1150 +    , _label(lbl)
  1.1151 +    , _block(NULL)
  1.1152 +    , _ublock(NULL)
  1.1153 +    , _stub(NULL) { }
  1.1154 +
  1.1155 +  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block);
  1.1156 +  LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub);
  1.1157 +
  1.1158 +  // for unordered comparisons
  1.1159 +  LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock);
  1.1160 +
  1.1161 +  LIR_Condition cond()        const              { return _cond;        }
  1.1162 +  BasicType     type()        const              { return _type;        }
  1.1163 +  Label*        label()       const              { return _label;       }
  1.1164 +  BlockBegin*   block()       const              { return _block;       }
  1.1165 +  BlockBegin*   ublock()      const              { return _ublock;      }
  1.1166 +  CodeStub*     stub()        const              { return _stub;       }
  1.1167 +
  1.1168 +  void          change_block(BlockBegin* b);
  1.1169 +  void          change_ublock(BlockBegin* b);
  1.1170 +  void          negate_cond();
  1.1171 +
  1.1172 +  virtual void emit_code(LIR_Assembler* masm);
  1.1173 +  virtual LIR_OpBranch* as_OpBranch() { return this; }
  1.1174 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1175 +};
  1.1176 +
  1.1177 +
  1.1178 +class ConversionStub;
  1.1179 +
  1.1180 +class LIR_OpConvert: public LIR_Op1 {
  1.1181 + friend class LIR_OpVisitState;
  1.1182 +
  1.1183 + private:
  1.1184 +   Bytecodes::Code _bytecode;
  1.1185 +   ConversionStub* _stub;
  1.1186 +
  1.1187 + public:
  1.1188 +   LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
  1.1189 +     : LIR_Op1(lir_convert, opr, result)
  1.1190 +     , _stub(stub)
  1.1191 +     , _bytecode(code)                           {}
  1.1192 +
  1.1193 +  Bytecodes::Code bytecode() const               { return _bytecode; }
  1.1194 +  ConversionStub* stub() const                   { return _stub; }
  1.1195 +
  1.1196 +  virtual void emit_code(LIR_Assembler* masm);
  1.1197 +  virtual LIR_OpConvert* as_OpConvert() { return this; }
  1.1198 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1199 +
  1.1200 +  static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
  1.1201 +};
  1.1202 +
  1.1203 +
  1.1204 +// LIR_OpAllocObj
  1.1205 +class LIR_OpAllocObj : public LIR_Op1 {
  1.1206 + friend class LIR_OpVisitState;
  1.1207 +
  1.1208 + private:
  1.1209 +  LIR_Opr _tmp1;
  1.1210 +  LIR_Opr _tmp2;
  1.1211 +  LIR_Opr _tmp3;
  1.1212 +  LIR_Opr _tmp4;
  1.1213 +  int     _hdr_size;
  1.1214 +  int     _obj_size;
  1.1215 +  CodeStub* _stub;
  1.1216 +  bool    _init_check;
  1.1217 +
  1.1218 + public:
  1.1219 +  LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
  1.1220 +                 LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
  1.1221 +                 int hdr_size, int obj_size, bool init_check, CodeStub* stub)
  1.1222 +    : LIR_Op1(lir_alloc_object, klass, result)
  1.1223 +    , _tmp1(t1)
  1.1224 +    , _tmp2(t2)
  1.1225 +    , _tmp3(t3)
  1.1226 +    , _tmp4(t4)
  1.1227 +    , _hdr_size(hdr_size)
  1.1228 +    , _obj_size(obj_size)
  1.1229 +    , _init_check(init_check)
  1.1230 +    , _stub(stub)                                { }
  1.1231 +
  1.1232 +  LIR_Opr klass()        const                   { return in_opr();     }
  1.1233 +  LIR_Opr obj()          const                   { return result_opr(); }
  1.1234 +  LIR_Opr tmp1()         const                   { return _tmp1;        }
  1.1235 +  LIR_Opr tmp2()         const                   { return _tmp2;        }
  1.1236 +  LIR_Opr tmp3()         const                   { return _tmp3;        }
  1.1237 +  LIR_Opr tmp4()         const                   { return _tmp4;        }
  1.1238 +  int     header_size()  const                   { return _hdr_size;    }
  1.1239 +  int     object_size()  const                   { return _obj_size;    }
  1.1240 +  bool    init_check()   const                   { return _init_check;  }
  1.1241 +  CodeStub* stub()       const                   { return _stub;        }
  1.1242 +
  1.1243 +  virtual void emit_code(LIR_Assembler* masm);
  1.1244 +  virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
  1.1245 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1246 +};
  1.1247 +
  1.1248 +
  1.1249 +// LIR_OpRoundFP
  1.1250 +class LIR_OpRoundFP : public LIR_Op1 {
  1.1251 + friend class LIR_OpVisitState;
  1.1252 +
  1.1253 + private:
  1.1254 +  LIR_Opr _tmp;
  1.1255 +
  1.1256 + public:
  1.1257 +  LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
  1.1258 +    : LIR_Op1(lir_roundfp, reg, result)
  1.1259 +    , _tmp(stack_loc_temp) {}
  1.1260 +
  1.1261 +  LIR_Opr tmp() const                            { return _tmp; }
  1.1262 +  virtual LIR_OpRoundFP* as_OpRoundFP()          { return this; }
  1.1263 +  void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1264 +};
  1.1265 +
  1.1266 +// LIR_OpTypeCheck
  1.1267 +class LIR_OpTypeCheck: public LIR_Op {
  1.1268 + friend class LIR_OpVisitState;
  1.1269 +
  1.1270 + private:
  1.1271 +  LIR_Opr       _object;
  1.1272 +  LIR_Opr       _array;
  1.1273 +  ciKlass*      _klass;
  1.1274 +  LIR_Opr       _tmp1;
  1.1275 +  LIR_Opr       _tmp2;
  1.1276 +  LIR_Opr       _tmp3;
  1.1277 +  bool          _fast_check;
  1.1278 +  CodeEmitInfo* _info_for_patch;
  1.1279 +  CodeEmitInfo* _info_for_exception;
  1.1280 +  CodeStub*     _stub;
  1.1281 +  // Helpers for Tier1UpdateMethodData
  1.1282 +  ciMethod*     _profiled_method;
  1.1283 +  int           _profiled_bci;
  1.1284 +
  1.1285 +public:
  1.1286 +  LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
  1.1287 +                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
  1.1288 +                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
  1.1289 +                  ciMethod* profiled_method, int profiled_bci);
  1.1290 +  LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
  1.1291 +                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception,
  1.1292 +                  ciMethod* profiled_method, int profiled_bci);
  1.1293 +
  1.1294 +  LIR_Opr object() const                         { return _object;         }
  1.1295 +  LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
  1.1296 +  LIR_Opr tmp1() const                           { return _tmp1;           }
  1.1297 +  LIR_Opr tmp2() const                           { return _tmp2;           }
  1.1298 +  LIR_Opr tmp3() const                           { return _tmp3;           }
  1.1299 +  ciKlass* klass() const                         { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass;          }
  1.1300 +  bool fast_check() const                        { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check;     }
  1.1301 +  CodeEmitInfo* info_for_patch() const           { return _info_for_patch;  }
  1.1302 +  CodeEmitInfo* info_for_exception() const       { return _info_for_exception; }
  1.1303 +  CodeStub* stub() const                         { return _stub;           }
  1.1304 +
  1.1305 +  // methodDataOop profiling
  1.1306 +  ciMethod* profiled_method()                    { return _profiled_method; }
  1.1307 +  int       profiled_bci()                       { return _profiled_bci; }
  1.1308 +
  1.1309 +  virtual void emit_code(LIR_Assembler* masm);
  1.1310 +  virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
  1.1311 +  void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1312 +};
  1.1313 +
  1.1314 +// LIR_Op2
  1.1315 +class LIR_Op2: public LIR_Op {
  1.1316 + friend class LIR_OpVisitState;
  1.1317 +
  1.1318 +  int  _fpu_stack_size; // for sin/cos implementation on Intel
  1.1319 +
  1.1320 + protected:
  1.1321 +  LIR_Opr   _opr1;
  1.1322 +  LIR_Opr   _opr2;
  1.1323 +  BasicType _type;
  1.1324 +  LIR_Opr   _tmp;
  1.1325 +  LIR_Condition _condition;
  1.1326 +
  1.1327 +  void verify() const;
  1.1328 +
  1.1329 + public:
  1.1330 +  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
  1.1331 +    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
  1.1332 +    , _opr1(opr1)
  1.1333 +    , _opr2(opr2)
  1.1334 +    , _type(T_ILLEGAL)
  1.1335 +    , _condition(condition)
  1.1336 +    , _fpu_stack_size(0)
  1.1337 +    , _tmp(LIR_OprFact::illegalOpr) {
  1.1338 +    assert(code == lir_cmp, "code check");
  1.1339 +  }
  1.1340 +
  1.1341 +  LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result)
  1.1342 +    : LIR_Op(code, result, NULL)
  1.1343 +    , _opr1(opr1)
  1.1344 +    , _opr2(opr2)
  1.1345 +    , _type(T_ILLEGAL)
  1.1346 +    , _condition(condition)
  1.1347 +    , _fpu_stack_size(0)
  1.1348 +    , _tmp(LIR_OprFact::illegalOpr) {
  1.1349 +    assert(code == lir_cmove, "code check");
  1.1350 +  }
  1.1351 +
  1.1352 +  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
  1.1353 +          CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
  1.1354 +    : LIR_Op(code, result, info)
  1.1355 +    , _opr1(opr1)
  1.1356 +    , _opr2(opr2)
  1.1357 +    , _type(type)
  1.1358 +    , _condition(lir_cond_unknown)
  1.1359 +    , _fpu_stack_size(0)
  1.1360 +    , _tmp(LIR_OprFact::illegalOpr) {
  1.1361 +    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
  1.1362 +  }
  1.1363 +
  1.1364 +  LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp)
  1.1365 +    : LIR_Op(code, result, NULL)
  1.1366 +    , _opr1(opr1)
  1.1367 +    , _opr2(opr2)
  1.1368 +    , _type(T_ILLEGAL)
  1.1369 +    , _condition(lir_cond_unknown)
  1.1370 +    , _fpu_stack_size(0)
  1.1371 +    , _tmp(tmp) {
  1.1372 +    assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
  1.1373 +  }
  1.1374 +
  1.1375 +  LIR_Opr in_opr1() const                        { return _opr1; }
  1.1376 +  LIR_Opr in_opr2() const                        { return _opr2; }
  1.1377 +  BasicType type()  const                        { return _type; }
  1.1378 +  LIR_Opr tmp_opr() const                        { return _tmp; }
  1.1379 +  LIR_Condition condition() const  {
  1.1380 +    assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition;
  1.1381 +  }
  1.1382 +
  1.1383 +  void set_fpu_stack_size(int size)              { _fpu_stack_size = size; }
  1.1384 +  int  fpu_stack_size() const                    { return _fpu_stack_size; }
  1.1385 +
  1.1386 +  void set_in_opr1(LIR_Opr opr)                  { _opr1 = opr; }
  1.1387 +  void set_in_opr2(LIR_Opr opr)                  { _opr2 = opr; }
  1.1388 +
  1.1389 +  virtual void emit_code(LIR_Assembler* masm);
  1.1390 +  virtual LIR_Op2* as_Op2() { return this; }
  1.1391 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1392 +};
  1.1393 +
  1.1394 +class LIR_OpAllocArray : public LIR_Op {
  1.1395 + friend class LIR_OpVisitState;
  1.1396 +
  1.1397 + private:
  1.1398 +  LIR_Opr   _klass;
  1.1399 +  LIR_Opr   _len;
  1.1400 +  LIR_Opr   _tmp1;
  1.1401 +  LIR_Opr   _tmp2;
  1.1402 +  LIR_Opr   _tmp3;
  1.1403 +  LIR_Opr   _tmp4;
  1.1404 +  BasicType _type;
  1.1405 +  CodeStub* _stub;
  1.1406 +
  1.1407 + public:
  1.1408 +  LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
  1.1409 +    : LIR_Op(lir_alloc_array, result, NULL)
  1.1410 +    , _klass(klass)
  1.1411 +    , _len(len)
  1.1412 +    , _tmp1(t1)
  1.1413 +    , _tmp2(t2)
  1.1414 +    , _tmp3(t3)
  1.1415 +    , _tmp4(t4)
  1.1416 +    , _type(type)
  1.1417 +    , _stub(stub) {}
  1.1418 +
  1.1419 +  LIR_Opr   klass()   const                      { return _klass;       }
  1.1420 +  LIR_Opr   len()     const                      { return _len;         }
  1.1421 +  LIR_Opr   obj()     const                      { return result_opr(); }
  1.1422 +  LIR_Opr   tmp1()    const                      { return _tmp1;        }
  1.1423 +  LIR_Opr   tmp2()    const                      { return _tmp2;        }
  1.1424 +  LIR_Opr   tmp3()    const                      { return _tmp3;        }
  1.1425 +  LIR_Opr   tmp4()    const                      { return _tmp4;        }
  1.1426 +  BasicType type()    const                      { return _type;        }
  1.1427 +  CodeStub* stub()    const                      { return _stub;        }
  1.1428 +
  1.1429 +  virtual void emit_code(LIR_Assembler* masm);
  1.1430 +  virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
  1.1431 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1432 +};
  1.1433 +
  1.1434 +
  1.1435 +class LIR_Op3: public LIR_Op {
  1.1436 + friend class LIR_OpVisitState;
  1.1437 +
  1.1438 + private:
  1.1439 +  LIR_Opr _opr1;
  1.1440 +  LIR_Opr _opr2;
  1.1441 +  LIR_Opr _opr3;
  1.1442 + public:
  1.1443 +  LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
  1.1444 +    : LIR_Op(code, result, info)
  1.1445 +    , _opr1(opr1)
  1.1446 +    , _opr2(opr2)
  1.1447 +    , _opr3(opr3)                                { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
  1.1448 +  LIR_Opr in_opr1() const                        { return _opr1; }
  1.1449 +  LIR_Opr in_opr2() const                        { return _opr2; }
  1.1450 +  LIR_Opr in_opr3() const                        { return _opr3; }
  1.1451 +
  1.1452 +  virtual void emit_code(LIR_Assembler* masm);
  1.1453 +  virtual LIR_Op3* as_Op3() { return this; }
  1.1454 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1455 +};
  1.1456 +
  1.1457 +
  1.1458 +//--------------------------------
  1.1459 +class LabelObj: public CompilationResourceObj {
  1.1460 + private:
  1.1461 +  Label _label;
  1.1462 + public:
  1.1463 +  LabelObj()                                     {}
  1.1464 +  Label* label()                                 { return &_label; }
  1.1465 +};
  1.1466 +
  1.1467 +
  1.1468 +class LIR_OpLock: public LIR_Op {
  1.1469 + friend class LIR_OpVisitState;
  1.1470 +
  1.1471 + private:
  1.1472 +  LIR_Opr _hdr;
  1.1473 +  LIR_Opr _obj;
  1.1474 +  LIR_Opr _lock;
  1.1475 +  LIR_Opr _scratch;
  1.1476 +  CodeStub* _stub;
  1.1477 + public:
  1.1478 +  LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
  1.1479 +    : LIR_Op(code, LIR_OprFact::illegalOpr, info)
  1.1480 +    , _hdr(hdr)
  1.1481 +    , _obj(obj)
  1.1482 +    , _lock(lock)
  1.1483 +    , _scratch(scratch)
  1.1484 +    , _stub(stub)                      {}
  1.1485 +
  1.1486 +  LIR_Opr hdr_opr() const                        { return _hdr; }
  1.1487 +  LIR_Opr obj_opr() const                        { return _obj; }
  1.1488 +  LIR_Opr lock_opr() const                       { return _lock; }
  1.1489 +  LIR_Opr scratch_opr() const                    { return _scratch; }
  1.1490 +  CodeStub* stub() const                         { return _stub; }
  1.1491 +
  1.1492 +  virtual void emit_code(LIR_Assembler* masm);
  1.1493 +  virtual LIR_OpLock* as_OpLock() { return this; }
  1.1494 +  void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1495 +};
  1.1496 +
  1.1497 +
  1.1498 +class LIR_OpDelay: public LIR_Op {
  1.1499 + friend class LIR_OpVisitState;
  1.1500 +
  1.1501 + private:
  1.1502 +  LIR_Op* _op;
  1.1503 +
  1.1504 + public:
  1.1505 +  LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
  1.1506 +    LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
  1.1507 +    _op(op) {
  1.1508 +    assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops");
  1.1509 +  }
  1.1510 +  virtual void emit_code(LIR_Assembler* masm);
  1.1511 +  virtual LIR_OpDelay* as_OpDelay() { return this; }
  1.1512 +  void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1513 +  LIR_Op* delay_op() const { return _op; }
  1.1514 +  CodeEmitInfo* call_info() const { return info(); }
  1.1515 +};
  1.1516 +
  1.1517 +
  1.1518 +// LIR_OpCompareAndSwap
  1.1519 +class LIR_OpCompareAndSwap : public LIR_Op {
  1.1520 + friend class LIR_OpVisitState;
  1.1521 +
  1.1522 + private:
  1.1523 +  LIR_Opr _addr;
  1.1524 +  LIR_Opr _cmp_value;
  1.1525 +  LIR_Opr _new_value;
  1.1526 +  LIR_Opr _tmp1;
  1.1527 +  LIR_Opr _tmp2;
  1.1528 +
  1.1529 + public:
  1.1530 +  LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2)
  1.1531 +    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
  1.1532 +    , _addr(addr)
  1.1533 +    , _cmp_value(cmp_value)
  1.1534 +    , _new_value(new_value)
  1.1535 +    , _tmp1(t1)
  1.1536 +    , _tmp2(t2)                                  { }
  1.1537 +
  1.1538 +  LIR_Opr addr()        const                    { return _addr;  }
  1.1539 +  LIR_Opr cmp_value()   const                    { return _cmp_value; }
  1.1540 +  LIR_Opr new_value()   const                    { return _new_value; }
  1.1541 +  LIR_Opr tmp1()        const                    { return _tmp1;      }
  1.1542 +  LIR_Opr tmp2()        const                    { return _tmp2;      }
  1.1543 +
  1.1544 +  virtual void emit_code(LIR_Assembler* masm);
  1.1545 +  virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
  1.1546 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1547 +};
  1.1548 +
  1.1549 +// LIR_OpProfileCall
  1.1550 +class LIR_OpProfileCall : public LIR_Op {
  1.1551 + friend class LIR_OpVisitState;
  1.1552 +
  1.1553 + private:
  1.1554 +  ciMethod* _profiled_method;
  1.1555 +  int _profiled_bci;
  1.1556 +  LIR_Opr _mdo;
  1.1557 +  LIR_Opr _recv;
  1.1558 +  LIR_Opr _tmp1;
  1.1559 +  ciKlass* _known_holder;
  1.1560 +
  1.1561 + public:
  1.1562 +  // Destroys recv
  1.1563 +  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
  1.1564 +    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
  1.1565 +    , _profiled_method(profiled_method)
  1.1566 +    , _profiled_bci(profiled_bci)
  1.1567 +    , _mdo(mdo)
  1.1568 +    , _recv(recv)
  1.1569 +    , _tmp1(t1)
  1.1570 +    , _known_holder(known_holder)                { }
  1.1571 +
  1.1572 +  ciMethod* profiled_method() const              { return _profiled_method;  }
  1.1573 +  int       profiled_bci()    const              { return _profiled_bci;     }
  1.1574 +  LIR_Opr   mdo()             const              { return _mdo;              }
  1.1575 +  LIR_Opr   recv()            const              { return _recv;             }
  1.1576 +  LIR_Opr   tmp1()            const              { return _tmp1;             }
  1.1577 +  ciKlass*  known_holder()    const              { return _known_holder;     }
  1.1578 +
  1.1579 +  virtual void emit_code(LIR_Assembler* masm);
  1.1580 +  virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
  1.1581 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
  1.1582 +};
  1.1583 +
  1.1584 +
  1.1585 +class LIR_InsertionBuffer;
  1.1586 +
  1.1587 +//--------------------------------LIR_List---------------------------------------------------
  1.1588 +// Maintains a list of LIR instructions (one instance of LIR_List per basic block)
  1.1589 +// The LIR instructions are appended by the LIR_List class itself;
  1.1590 +//
  1.1591 +// Notes:
  1.1592 +// - all offsets are(should be) in bytes
  1.1593 +// - local positions are specified with an offset, with offset 0 being local 0
  1.1594 +
  1.1595 +class LIR_List: public CompilationResourceObj {
  1.1596 + private:
  1.1597 +  LIR_OpList  _operations;
  1.1598 +
  1.1599 +  Compilation*  _compilation;
  1.1600 +#ifndef PRODUCT
  1.1601 +  BlockBegin*   _block;
  1.1602 +#endif
  1.1603 +#ifdef ASSERT
  1.1604 +  const char *  _file;
  1.1605 +  int           _line;
  1.1606 +#endif
  1.1607 +
  1.1608 +  void append(LIR_Op* op) {
  1.1609 +    if (op->source() == NULL)
  1.1610 +      op->set_source(_compilation->current_instruction());
  1.1611 +#ifndef PRODUCT
  1.1612 +    if (PrintIRWithLIR) {
  1.1613 +      _compilation->maybe_print_current_instruction();
  1.1614 +      op->print(); tty->cr();
  1.1615 +    }
  1.1616 +#endif // PRODUCT
  1.1617 +
  1.1618 +    _operations.append(op);
  1.1619 +
  1.1620 +#ifdef ASSERT
  1.1621 +    op->verify();
  1.1622 +    op->set_file_and_line(_file, _line);
  1.1623 +    _file = NULL;
  1.1624 +    _line = 0;
  1.1625 +#endif
  1.1626 +  }
  1.1627 +
  1.1628 + public:
  1.1629 +  LIR_List(Compilation* compilation, BlockBegin* block = NULL);
  1.1630 +
  1.1631 +#ifdef ASSERT
  1.1632 +  void set_file_and_line(const char * file, int line);
  1.1633 +#endif
  1.1634 +
  1.1635 +  //---------- accessors ---------------
  1.1636 +  LIR_OpList* instructions_list()                { return &_operations; }
  1.1637 +  int         length() const                     { return _operations.length(); }
  1.1638 +  LIR_Op*     at(int i) const                    { return _operations.at(i); }
  1.1639 +
  1.1640 +  NOT_PRODUCT(BlockBegin* block() const          { return _block; });
  1.1641 +
  1.1642 +  // insert LIR_Ops in buffer to right places in LIR_List
  1.1643 +  void append(LIR_InsertionBuffer* buffer);
  1.1644 +
  1.1645 +  //---------- mutators ---------------
  1.1646 +  void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
  1.1647 +  void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
  1.1648 +
  1.1649 +  //---------- printing -------------
  1.1650 +  void print_instructions() PRODUCT_RETURN;
  1.1651 +
  1.1652 +
  1.1653 +  //---------- instructions -------------
  1.1654 +  void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
  1.1655 +                        address dest, LIR_OprList* arguments,
  1.1656 +                        CodeEmitInfo* info) {
  1.1657 +    append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
  1.1658 +  }
  1.1659 +  void call_static(ciMethod* method, LIR_Opr result,
  1.1660 +                   address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
  1.1661 +    append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
  1.1662 +  }
  1.1663 +  void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
  1.1664 +                      address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
  1.1665 +    append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
  1.1666 +  }
  1.1667 +  void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
  1.1668 +                    intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) {
  1.1669 +    append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info));
  1.1670 +  }
  1.1671 +
  1.1672 +  void get_thread(LIR_Opr result)                { append(new LIR_Op0(lir_get_thread, result)); }
  1.1673 +  void word_align()                              { append(new LIR_Op0(lir_word_align)); }
  1.1674 +  void membar()                                  { append(new LIR_Op0(lir_membar)); }
  1.1675 +  void membar_acquire()                          { append(new LIR_Op0(lir_membar_acquire)); }
  1.1676 +  void membar_release()                          { append(new LIR_Op0(lir_membar_release)); }
  1.1677 +
  1.1678 +  void nop()                                     { append(new LIR_Op0(lir_nop)); }
  1.1679 +  void build_frame()                             { append(new LIR_Op0(lir_build_frame)); }
  1.1680 +
  1.1681 +  void std_entry(LIR_Opr receiver)               { append(new LIR_Op0(lir_std_entry, receiver)); }
  1.1682 +  void osr_entry(LIR_Opr osrPointer)             { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
  1.1683 +
  1.1684 +  void branch_destination(Label* lbl)            { append(new LIR_OpLabel(lbl)); }
  1.1685 +
  1.1686 +  void negate(LIR_Opr from, LIR_Opr to)          { append(new LIR_Op1(lir_neg, from, to)); }
  1.1687 +  void leal(LIR_Opr from, LIR_Opr result_reg)    { append(new LIR_Op1(lir_leal, from, result_reg)); }
  1.1688 +
  1.1689 +  // result is a stack location for old backend and vreg for UseLinearScan
  1.1690 +  // stack_loc_temp is an illegal register for old backend
  1.1691 +  void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
  1.1692 +  void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
  1.1693 +  void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
  1.1694 +  void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
  1.1695 +  void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
  1.1696 +  void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
  1.1697 +  void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
  1.1698 +
  1.1699 +  void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
  1.1700 +
  1.1701 +  void oop2reg  (jobject o, LIR_Opr reg)         { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o),    reg));   }
  1.1702 +  void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
  1.1703 +
  1.1704 +  void return_op(LIR_Opr result)                 { append(new LIR_Op1(lir_return, result)); }
  1.1705 +
  1.1706 +  void safepoint(LIR_Opr tmp, CodeEmitInfo* info)  { append(new LIR_Op1(lir_safepoint, tmp, info)); }
  1.1707 +
  1.1708 +  void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
  1.1709 +
  1.1710 +  void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and,  left, right, dst)); }
  1.1711 +  void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
  1.1712 +  void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
  1.1713 +
  1.1714 +  void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
  1.1715 +  void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
  1.1716 +  void unwind_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_unwind, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); }
  1.1717 +
  1.1718 +  void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
  1.1719 +    append(new LIR_Op2(lir_compare_to,  left, right, dst));
  1.1720 +  }
  1.1721 +
  1.1722 +  void push(LIR_Opr opr)                                   { append(new LIR_Op1(lir_push, opr)); }
  1.1723 +  void pop(LIR_Opr reg)                                    { append(new LIR_Op1(lir_pop,  reg)); }
  1.1724 +
  1.1725 +  void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
  1.1726 +    append(new LIR_Op2(lir_cmp, condition, left, right, info));
  1.1727 +  }
  1.1728 +  void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
  1.1729 +    cmp(condition, left, LIR_OprFact::intConst(right), info);
  1.1730 +  }
  1.1731 +
  1.1732 +  void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
  1.1733 +  void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
  1.1734 +
  1.1735 +  void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst) {
  1.1736 +    append(new LIR_Op2(lir_cmove, condition, src1, src2, dst));
  1.1737 +  }
  1.1738 +
  1.1739 +  void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
  1.1740 +  void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
  1.1741 +  void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2);
  1.1742 +
  1.1743 +  void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_abs , from, tmp, to)); }
  1.1744 +  void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
  1.1745 +  void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)                { append(new LIR_Op2(lir_log,  from, tmp, to)); }
  1.1746 +  void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp)              { append(new LIR_Op2(lir_log10, from, tmp, to)); }
  1.1747 +  void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); }
  1.1748 +  void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); }
  1.1749 +  void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
  1.1750 +
  1.1751 +  void add (LIR_Opr left, LIR_Opr right, LIR_Opr res)      { append(new LIR_Op2(lir_add, left, right, res)); }
  1.1752 +  void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
  1.1753 +  void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
  1.1754 +  void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); }
  1.1755 +  void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_div, left, right, res, info)); }
  1.1756 +  void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); }
  1.1757 +  void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL)      { append(new LIR_Op2(lir_rem, left, right, res, info)); }
  1.1758 +
  1.1759 +  void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
  1.1760 +  void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
  1.1761 +
  1.1762 +  void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
  1.1763 +
  1.1764 +  void prefetch(LIR_Address* addr, bool is_store);
  1.1765 +
  1.1766 +  void store_mem_int(jint v,    LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
  1.1767 +  void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
  1.1768 +  void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
  1.1769 +  void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
  1.1770 +  void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
  1.1771 +
  1.1772 +  void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
  1.1773 +  void idiv(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
  1.1774 +  void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
  1.1775 +  void irem(LIR_Opr left, int   right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
  1.1776 +
  1.1777 +  void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
  1.1778 +  void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
  1.1779 +
  1.1780 +  // jump is an unconditional branch
  1.1781 +  void jump(BlockBegin* block) {
  1.1782 +    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block));
  1.1783 +  }
  1.1784 +  void jump(CodeStub* stub) {
  1.1785 +    append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub));
  1.1786 +  }
  1.1787 +  void branch(LIR_Condition cond, Label* lbl)        { append(new LIR_OpBranch(cond, lbl)); }
  1.1788 +  void branch(LIR_Condition cond, BasicType type, BlockBegin* block) {
  1.1789 +    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
  1.1790 +    append(new LIR_OpBranch(cond, type, block));
  1.1791 +  }
  1.1792 +  void branch(LIR_Condition cond, BasicType type, CodeStub* stub)    {
  1.1793 +    assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons");
  1.1794 +    append(new LIR_OpBranch(cond, type, stub));
  1.1795 +  }
  1.1796 +  void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) {
  1.1797 +    assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only");
  1.1798 +    append(new LIR_OpBranch(cond, type, block, unordered));
  1.1799 +  }
  1.1800 +
  1.1801 +  void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
  1.1802 +  void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
  1.1803 +  void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
  1.1804 +
  1.1805 +  void shift_left(LIR_Opr value, int count, LIR_Opr dst)       { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
  1.1806 +  void shift_right(LIR_Opr value, int count, LIR_Opr dst)      { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
  1.1807 +  void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
  1.1808 +
  1.1809 +  void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst)        { append(new LIR_Op2(lir_cmp_l2i,  left, right, dst)); }
  1.1810 +  void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
  1.1811 +
  1.1812 +  void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
  1.1813 +    append(new LIR_OpRTCall(routine, tmp, result, arguments));
  1.1814 +  }
  1.1815 +
  1.1816 +  void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
  1.1817 +                    LIR_OprList* arguments, CodeEmitInfo* info) {
  1.1818 +    append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
  1.1819 +  }
  1.1820 +
  1.1821 +  void load_stack_address_monitor(int monitor_ix, LIR_Opr dst)  { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
  1.1822 +  void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, CodeStub* stub);
  1.1823 +  void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
  1.1824 +
  1.1825 +  void set_24bit_fpu()                                               { append(new LIR_Op0(lir_24bit_FPU )); }
  1.1826 +  void restore_fpu()                                                 { append(new LIR_Op0(lir_reset_FPU )); }
  1.1827 +  void breakpoint()                                                  { append(new LIR_Op0(lir_breakpoint)); }
  1.1828 +
  1.1829 +  void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
  1.1830 +
  1.1831 +  void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
  1.1832 +
  1.1833 +  void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
  1.1834 +                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
  1.1835 +                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
  1.1836 +                  ciMethod* profiled_method, int profiled_bci);
  1.1837 +  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
  1.1838 +  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
  1.1839 +
  1.1840 +  // methodDataOop profiling
  1.1841 +  void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); }
  1.1842 +};
  1.1843 +
  1.1844 +void print_LIR(BlockList* blocks);
  1.1845 +
  1.1846 +class LIR_InsertionBuffer : public CompilationResourceObj {
  1.1847 + private:
  1.1848 +  LIR_List*   _lir;   // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
  1.1849 +
  1.1850 +  // list of insertion points. index and count are stored alternately:
  1.1851 +  // _index_and_count[i * 2]:     the index into lir list where "count" ops should be inserted
  1.1852 +  // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
  1.1853 +  intStack    _index_and_count;
  1.1854 +
  1.1855 +  // the LIR_Ops to be inserted
  1.1856 +  LIR_OpList  _ops;
  1.1857 +
  1.1858 +  void append_new(int index, int count)  { _index_and_count.append(index); _index_and_count.append(count); }
  1.1859 +  void set_index_at(int i, int value)    { _index_and_count.at_put((i << 1),     value); }
  1.1860 +  void set_count_at(int i, int value)    { _index_and_count.at_put((i << 1) + 1, value); }
  1.1861 +
  1.1862 +#ifdef ASSERT
  1.1863 +  void verify();
  1.1864 +#endif
  1.1865 + public:
  1.1866 +  LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
  1.1867 +
  1.1868 +  // must be called before using the insertion buffer
  1.1869 +  void init(LIR_List* lir)  { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
  1.1870 +  bool initialized() const  { return _lir != NULL; }
  1.1871 +  // called automatically when the buffer is appended to the LIR_List
  1.1872 +  void finish()             { _lir = NULL; }
  1.1873 +
  1.1874 +  // accessors
  1.1875 +  LIR_List*  lir_list() const             { return _lir; }
  1.1876 +  int number_of_insertion_points() const  { return _index_and_count.length() >> 1; }
  1.1877 +  int index_at(int i) const               { return _index_and_count.at((i << 1));     }
  1.1878 +  int count_at(int i) const               { return _index_and_count.at((i << 1) + 1); }
  1.1879 +
  1.1880 +  int number_of_ops() const               { return _ops.length(); }
  1.1881 +  LIR_Op* op_at(int i) const              { return _ops.at(i); }
  1.1882 +
  1.1883 +  // append an instruction to the buffer
  1.1884 +  void append(int index, LIR_Op* op);
  1.1885 +
  1.1886 +  // instruction
  1.1887 +  void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
  1.1888 +};
  1.1889 +
  1.1890 +
  1.1891 +//
  1.1892 +// LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
  1.1893 +// Calling a LIR_Op's visit function with a LIR_OpVisitState causes
  1.1894 +// information about the input, output and temporaries used by the
  1.1895 +// op to be recorded.  It also records whether the op has call semantics
  1.1896 +// and also records all the CodeEmitInfos used by this op.
  1.1897 +//
  1.1898 +
  1.1899 +
  1.1900 +class LIR_OpVisitState: public StackObj {
  1.1901 + public:
  1.1902 +  typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
  1.1903 +
  1.1904 +  enum {
  1.1905 +    maxNumberOfOperands = 14,
  1.1906 +    maxNumberOfInfos = 4
  1.1907 +  };
  1.1908 +
  1.1909 + private:
  1.1910 +  LIR_Op*          _op;
  1.1911 +
  1.1912 +  // optimization: the operands and infos are not stored in a variable-length
  1.1913 +  //               list, but in a fixed-size array to save time of size checks and resizing
  1.1914 +  int              _oprs_len[numModes];
  1.1915 +  LIR_Opr*         _oprs_new[numModes][maxNumberOfOperands];
  1.1916 +  int _info_len;
  1.1917 +  CodeEmitInfo*    _info_new[maxNumberOfInfos];
  1.1918 +
  1.1919 +  bool             _has_call;
  1.1920 +  bool             _has_slow_case;
  1.1921 +
  1.1922 +
  1.1923 +  // only include register operands
  1.1924 +  // addresses are decomposed to the base and index registers
  1.1925 +  // constants and stack operands are ignored
  1.1926 +  void append(LIR_Opr& opr, OprMode mode) {
  1.1927 +    assert(opr->is_valid(), "should not call this otherwise");
  1.1928 +    assert(mode >= 0 && mode < numModes, "bad mode");
  1.1929 +
  1.1930 +    if (opr->is_register()) {
  1.1931 +       assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
  1.1932 +      _oprs_new[mode][_oprs_len[mode]++] = &opr;
  1.1933 +
  1.1934 +    } else if (opr->is_pointer()) {
  1.1935 +      LIR_Address* address = opr->as_address_ptr();
  1.1936 +      if (address != NULL) {
  1.1937 +        // special handling for addresses: add base and index register of the address
  1.1938 +        // both are always input operands!
  1.1939 +        if (address->_base->is_valid()) {
  1.1940 +          assert(address->_base->is_register(), "must be");
  1.1941 +          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
  1.1942 +          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_base;
  1.1943 +        }
  1.1944 +        if (address->_index->is_valid()) {
  1.1945 +          assert(address->_index->is_register(), "must be");
  1.1946 +          assert(_oprs_len[inputMode] < maxNumberOfOperands, "array overflow");
  1.1947 +          _oprs_new[inputMode][_oprs_len[inputMode]++] = &address->_index;
  1.1948 +        }
  1.1949 +
  1.1950 +      } else {
  1.1951 +        assert(opr->is_constant(), "constant operands are not processed");
  1.1952 +      }
  1.1953 +    } else {
  1.1954 +      assert(opr->is_stack(), "stack operands are not processed");
  1.1955 +    }
  1.1956 +  }
  1.1957 +
  1.1958 +  void append(CodeEmitInfo* info) {
  1.1959 +    assert(info != NULL, "should not call this otherwise");
  1.1960 +    assert(_info_len < maxNumberOfInfos, "array overflow");
  1.1961 +    _info_new[_info_len++] = info;
  1.1962 +  }
  1.1963 +
  1.1964 + public:
  1.1965 +  LIR_OpVisitState()         { reset(); }
  1.1966 +
  1.1967 +  LIR_Op* op() const         { return _op; }
  1.1968 +  void set_op(LIR_Op* op)    { reset(); _op = op; }
  1.1969 +
  1.1970 +  bool has_call() const      { return _has_call; }
  1.1971 +  bool has_slow_case() const { return _has_slow_case; }
  1.1972 +
  1.1973 +  void reset() {
  1.1974 +    _op = NULL;
  1.1975 +    _has_call = false;
  1.1976 +    _has_slow_case = false;
  1.1977 +
  1.1978 +    _oprs_len[inputMode] = 0;
  1.1979 +    _oprs_len[tempMode] = 0;
  1.1980 +    _oprs_len[outputMode] = 0;
  1.1981 +    _info_len = 0;
  1.1982 +  }
  1.1983 +
  1.1984 +
  1.1985 +  int opr_count(OprMode mode) const {
  1.1986 +    assert(mode >= 0 && mode < numModes, "bad mode");
  1.1987 +    return _oprs_len[mode];
  1.1988 +  }
  1.1989 +
  1.1990 +  LIR_Opr opr_at(OprMode mode, int index) const {
  1.1991 +    assert(mode >= 0 && mode < numModes, "bad mode");
  1.1992 +    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
  1.1993 +    return *_oprs_new[mode][index];
  1.1994 +  }
  1.1995 +
  1.1996 +  void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
  1.1997 +    assert(mode >= 0 && mode < numModes, "bad mode");
  1.1998 +    assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
  1.1999 +    *_oprs_new[mode][index] = opr;
  1.2000 +  }
  1.2001 +
  1.2002 +  int info_count() const {
  1.2003 +    return _info_len;
  1.2004 +  }
  1.2005 +
  1.2006 +  CodeEmitInfo* info_at(int index) const {
  1.2007 +    assert(index < _info_len, "index out of bounds");
  1.2008 +    return _info_new[index];
  1.2009 +  }
  1.2010 +
  1.2011 +  XHandlers* all_xhandler();
  1.2012 +
  1.2013 +  // collects all register operands of the instruction
  1.2014 +  void visit(LIR_Op* op);
  1.2015 +
  1.2016 +#if ASSERT
  1.2017 +  // check that an operation has no operands
  1.2018 +  bool no_operands(LIR_Op* op);
  1.2019 +#endif
  1.2020 +
  1.2021 +  // LIR_Op visitor functions use these to fill in the state
  1.2022 +  void do_input(LIR_Opr& opr)             { append(opr, LIR_OpVisitState::inputMode); }
  1.2023 +  void do_output(LIR_Opr& opr)            { append(opr, LIR_OpVisitState::outputMode); }
  1.2024 +  void do_temp(LIR_Opr& opr)              { append(opr, LIR_OpVisitState::tempMode); }
  1.2025 +  void do_info(CodeEmitInfo* info)        { append(info); }
  1.2026 +
  1.2027 +  void do_stub(CodeStub* stub);
  1.2028 +  void do_call()                          { _has_call = true; }
  1.2029 +  void do_slow_case()                     { _has_slow_case = true; }
  1.2030 +  void do_slow_case(CodeEmitInfo* info) {
  1.2031 +    _has_slow_case = true;
  1.2032 +    append(info);
  1.2033 +  }
  1.2034 +};
  1.2035 +
  1.2036 +
  1.2037 +inline LIR_Opr LIR_OprDesc::illegalOpr()   { return LIR_OprFact::illegalOpr; };

mercurial