1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/c1/c1_LIR.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,2508 @@ 1.4 +/* 1.5 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_C1_C1_LIR_HPP 1.29 +#define SHARE_VM_C1_C1_LIR_HPP 1.30 + 1.31 +#include "c1/c1_ValueType.hpp" 1.32 +#include "oops/method.hpp" 1.33 + 1.34 +class BlockBegin; 1.35 +class BlockList; 1.36 +class LIR_Assembler; 1.37 +class CodeEmitInfo; 1.38 +class CodeStub; 1.39 +class CodeStubList; 1.40 +class ArrayCopyStub; 1.41 +class LIR_Op; 1.42 +class ciType; 1.43 +class ValueType; 1.44 +class LIR_OpVisitState; 1.45 +class FpuStackSim; 1.46 + 1.47 +//--------------------------------------------------------------------- 1.48 +// LIR Operands 1.49 +// LIR_OprDesc 1.50 +// LIR_OprPtr 1.51 +// LIR_Const 1.52 +// LIR_Address 1.53 +//--------------------------------------------------------------------- 1.54 +class LIR_OprDesc; 1.55 +class LIR_OprPtr; 1.56 +class LIR_Const; 1.57 +class LIR_Address; 1.58 +class LIR_OprVisitor; 1.59 + 1.60 + 1.61 +typedef LIR_OprDesc* LIR_Opr; 1.62 +typedef int RegNr; 1.63 + 1.64 +define_array(LIR_OprArray, LIR_Opr) 1.65 +define_stack(LIR_OprList, LIR_OprArray) 1.66 + 1.67 +define_array(LIR_OprRefArray, LIR_Opr*) 1.68 +define_stack(LIR_OprRefList, LIR_OprRefArray) 1.69 + 1.70 +define_array(CodeEmitInfoArray, CodeEmitInfo*) 1.71 +define_stack(CodeEmitInfoList, CodeEmitInfoArray) 1.72 + 1.73 +define_array(LIR_OpArray, LIR_Op*) 1.74 +define_stack(LIR_OpList, LIR_OpArray) 1.75 + 1.76 +// define LIR_OprPtr early so LIR_OprDesc can refer to it 1.77 +class LIR_OprPtr: public CompilationResourceObj { 1.78 + public: 1.79 + bool is_oop_pointer() const { return (type() == T_OBJECT); } 1.80 + bool is_float_kind() const { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); } 1.81 + 1.82 + virtual LIR_Const* as_constant() { return NULL; } 1.83 + virtual LIR_Address* as_address() { return NULL; } 1.84 + virtual BasicType type() const = 0; 1.85 + virtual void print_value_on(outputStream* out) const = 0; 1.86 +}; 1.87 + 1.88 + 1.89 + 1.90 +// LIR constants 1.91 +class LIR_Const: public LIR_OprPtr { 1.92 + private: 1.93 + JavaValue _value; 1.94 + 1.95 + void type_check(BasicType t) const { assert(type() == t, "type check"); } 1.96 + void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); } 1.97 + void type_check(BasicType t1, BasicType t2, BasicType t3) const { assert(type() == t1 || type() == t2 || type() == t3, "type check"); } 1.98 + 1.99 + public: 1.100 + LIR_Const(jint i, bool is_address=false) { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); } 1.101 + LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); } 1.102 + LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); } 1.103 + LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); } 1.104 + LIR_Const(jobject o) { _value.set_type(T_OBJECT); _value.set_jobject(o); } 1.105 + LIR_Const(void* p) { 1.106 +#ifdef _LP64 1.107 + assert(sizeof(jlong) >= sizeof(p), "too small");; 1.108 + _value.set_type(T_LONG); _value.set_jlong((jlong)p); 1.109 +#else 1.110 + assert(sizeof(jint) >= sizeof(p), "too small");; 1.111 + _value.set_type(T_INT); _value.set_jint((jint)p); 1.112 +#endif 1.113 + } 1.114 + LIR_Const(Metadata* m) { 1.115 + _value.set_type(T_METADATA); 1.116 +#ifdef _LP64 1.117 + _value.set_jlong((jlong)m); 1.118 +#else 1.119 + _value.set_jint((jint)m); 1.120 +#endif // _LP64 1.121 + } 1.122 + 1.123 + virtual BasicType type() const { return _value.get_type(); } 1.124 + virtual LIR_Const* as_constant() { return this; } 1.125 + 1.126 + jint as_jint() const { type_check(T_INT, T_ADDRESS); return _value.get_jint(); } 1.127 + jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); } 1.128 + jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); } 1.129 + jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); } 1.130 + jobject as_jobject() const { type_check(T_OBJECT); return _value.get_jobject(); } 1.131 + jint as_jint_lo() const { type_check(T_LONG ); return low(_value.get_jlong()); } 1.132 + jint as_jint_hi() const { type_check(T_LONG ); return high(_value.get_jlong()); } 1.133 + 1.134 +#ifdef _LP64 1.135 + address as_pointer() const { type_check(T_LONG ); return (address)_value.get_jlong(); } 1.136 + Metadata* as_metadata() const { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); } 1.137 +#else 1.138 + address as_pointer() const { type_check(T_INT ); return (address)_value.get_jint(); } 1.139 + Metadata* as_metadata() const { type_check(T_METADATA); return (Metadata*)_value.get_jint(); } 1.140 +#endif 1.141 + 1.142 + 1.143 + jint as_jint_bits() const { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); } 1.144 + jint as_jint_lo_bits() const { 1.145 + if (type() == T_DOUBLE) { 1.146 + return low(jlong_cast(_value.get_jdouble())); 1.147 + } else { 1.148 + return as_jint_lo(); 1.149 + } 1.150 + } 1.151 + jint as_jint_hi_bits() const { 1.152 + if (type() == T_DOUBLE) { 1.153 + return high(jlong_cast(_value.get_jdouble())); 1.154 + } else { 1.155 + return as_jint_hi(); 1.156 + } 1.157 + } 1.158 + jlong as_jlong_bits() const { 1.159 + if (type() == T_DOUBLE) { 1.160 + return jlong_cast(_value.get_jdouble()); 1.161 + } else { 1.162 + return as_jlong(); 1.163 + } 1.164 + } 1.165 + 1.166 + virtual void print_value_on(outputStream* out) const PRODUCT_RETURN; 1.167 + 1.168 + 1.169 + bool is_zero_float() { 1.170 + jfloat f = as_jfloat(); 1.171 + jfloat ok = 0.0f; 1.172 + return jint_cast(f) == jint_cast(ok); 1.173 + } 1.174 + 1.175 + bool is_one_float() { 1.176 + jfloat f = as_jfloat(); 1.177 + return !g_isnan(f) && g_isfinite(f) && f == 1.0; 1.178 + } 1.179 + 1.180 + bool is_zero_double() { 1.181 + jdouble d = as_jdouble(); 1.182 + jdouble ok = 0.0; 1.183 + return jlong_cast(d) == jlong_cast(ok); 1.184 + } 1.185 + 1.186 + bool is_one_double() { 1.187 + jdouble d = as_jdouble(); 1.188 + return !g_isnan(d) && g_isfinite(d) && d == 1.0; 1.189 + } 1.190 +}; 1.191 + 1.192 + 1.193 +//---------------------LIR Operand descriptor------------------------------------ 1.194 +// 1.195 +// The class LIR_OprDesc represents a LIR instruction operand; 1.196 +// it can be a register (ALU/FPU), stack location or a constant; 1.197 +// Constants and addresses are represented as resource area allocated 1.198 +// structures (see above). 1.199 +// Registers and stack locations are inlined into the this pointer 1.200 +// (see value function). 1.201 + 1.202 +class LIR_OprDesc: public CompilationResourceObj { 1.203 + public: 1.204 + // value structure: 1.205 + // data opr-type opr-kind 1.206 + // +--------------+-------+-------+ 1.207 + // [max...........|7 6 5 4|3 2 1 0] 1.208 + // ^ 1.209 + // is_pointer bit 1.210 + // 1.211 + // lowest bit cleared, means it is a structure pointer 1.212 + // we need 4 bits to represent types 1.213 + 1.214 + private: 1.215 + friend class LIR_OprFact; 1.216 + 1.217 + // Conversion 1.218 + intptr_t value() const { return (intptr_t) this; } 1.219 + 1.220 + bool check_value_mask(intptr_t mask, intptr_t masked_value) const { 1.221 + return (value() & mask) == masked_value; 1.222 + } 1.223 + 1.224 + enum OprKind { 1.225 + pointer_value = 0 1.226 + , stack_value = 1 1.227 + , cpu_register = 3 1.228 + , fpu_register = 5 1.229 + , illegal_value = 7 1.230 + }; 1.231 + 1.232 + enum OprBits { 1.233 + pointer_bits = 1 1.234 + , kind_bits = 3 1.235 + , type_bits = 4 1.236 + , size_bits = 2 1.237 + , destroys_bits = 1 1.238 + , virtual_bits = 1 1.239 + , is_xmm_bits = 1 1.240 + , last_use_bits = 1 1.241 + , is_fpu_stack_offset_bits = 1 // used in assertion checking on x86 for FPU stack slot allocation 1.242 + , non_data_bits = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits + 1.243 + is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits 1.244 + , data_bits = BitsPerInt - non_data_bits 1.245 + , reg_bits = data_bits / 2 // for two registers in one value encoding 1.246 + }; 1.247 + 1.248 + enum OprShift { 1.249 + kind_shift = 0 1.250 + , type_shift = kind_shift + kind_bits 1.251 + , size_shift = type_shift + type_bits 1.252 + , destroys_shift = size_shift + size_bits 1.253 + , last_use_shift = destroys_shift + destroys_bits 1.254 + , is_fpu_stack_offset_shift = last_use_shift + last_use_bits 1.255 + , virtual_shift = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits 1.256 + , is_xmm_shift = virtual_shift + virtual_bits 1.257 + , data_shift = is_xmm_shift + is_xmm_bits 1.258 + , reg1_shift = data_shift 1.259 + , reg2_shift = data_shift + reg_bits 1.260 + 1.261 + }; 1.262 + 1.263 + enum OprSize { 1.264 + single_size = 0 << size_shift 1.265 + , double_size = 1 << size_shift 1.266 + }; 1.267 + 1.268 + enum OprMask { 1.269 + kind_mask = right_n_bits(kind_bits) 1.270 + , type_mask = right_n_bits(type_bits) << type_shift 1.271 + , size_mask = right_n_bits(size_bits) << size_shift 1.272 + , last_use_mask = right_n_bits(last_use_bits) << last_use_shift 1.273 + , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift 1.274 + , virtual_mask = right_n_bits(virtual_bits) << virtual_shift 1.275 + , is_xmm_mask = right_n_bits(is_xmm_bits) << is_xmm_shift 1.276 + , pointer_mask = right_n_bits(pointer_bits) 1.277 + , lower_reg_mask = right_n_bits(reg_bits) 1.278 + , no_type_mask = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask)) 1.279 + }; 1.280 + 1.281 + uintptr_t data() const { return value() >> data_shift; } 1.282 + int lo_reg_half() const { return data() & lower_reg_mask; } 1.283 + int hi_reg_half() const { return (data() >> reg_bits) & lower_reg_mask; } 1.284 + OprKind kind_field() const { return (OprKind)(value() & kind_mask); } 1.285 + OprSize size_field() const { return (OprSize)(value() & size_mask); } 1.286 + 1.287 + static char type_char(BasicType t); 1.288 + 1.289 + public: 1.290 + enum { 1.291 + vreg_base = ConcreteRegisterImpl::number_of_registers, 1.292 + vreg_max = (1 << data_bits) - 1 1.293 + }; 1.294 + 1.295 + static inline LIR_Opr illegalOpr(); 1.296 + 1.297 + enum OprType { 1.298 + unknown_type = 0 << type_shift // means: not set (catch uninitialized types) 1.299 + , int_type = 1 << type_shift 1.300 + , long_type = 2 << type_shift 1.301 + , object_type = 3 << type_shift 1.302 + , address_type = 4 << type_shift 1.303 + , float_type = 5 << type_shift 1.304 + , double_type = 6 << type_shift 1.305 + , metadata_type = 7 << type_shift 1.306 + }; 1.307 + friend OprType as_OprType(BasicType t); 1.308 + friend BasicType as_BasicType(OprType t); 1.309 + 1.310 + OprType type_field_valid() const { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); } 1.311 + OprType type_field() const { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); } 1.312 + 1.313 + static OprSize size_for(BasicType t) { 1.314 + switch (t) { 1.315 + case T_LONG: 1.316 + case T_DOUBLE: 1.317 + return double_size; 1.318 + break; 1.319 + 1.320 + case T_FLOAT: 1.321 + case T_BOOLEAN: 1.322 + case T_CHAR: 1.323 + case T_BYTE: 1.324 + case T_SHORT: 1.325 + case T_INT: 1.326 + case T_ADDRESS: 1.327 + case T_OBJECT: 1.328 + case T_ARRAY: 1.329 + case T_METADATA: 1.330 + return single_size; 1.331 + break; 1.332 + 1.333 + default: 1.334 + ShouldNotReachHere(); 1.335 + return single_size; 1.336 + } 1.337 + } 1.338 + 1.339 + 1.340 + void validate_type() const PRODUCT_RETURN; 1.341 + 1.342 + BasicType type() const { 1.343 + if (is_pointer()) { 1.344 + return pointer()->type(); 1.345 + } 1.346 + return as_BasicType(type_field()); 1.347 + } 1.348 + 1.349 + 1.350 + ValueType* value_type() const { return as_ValueType(type()); } 1.351 + 1.352 + char type_char() const { return type_char((is_pointer()) ? pointer()->type() : type()); } 1.353 + 1.354 + bool is_equal(LIR_Opr opr) const { return this == opr; } 1.355 + // checks whether types are same 1.356 + bool is_same_type(LIR_Opr opr) const { 1.357 + assert(type_field() != unknown_type && 1.358 + opr->type_field() != unknown_type, "shouldn't see unknown_type"); 1.359 + return type_field() == opr->type_field(); 1.360 + } 1.361 + bool is_same_register(LIR_Opr opr) { 1.362 + return (is_register() && opr->is_register() && 1.363 + kind_field() == opr->kind_field() && 1.364 + (value() & no_type_mask) == (opr->value() & no_type_mask)); 1.365 + } 1.366 + 1.367 + bool is_pointer() const { return check_value_mask(pointer_mask, pointer_value); } 1.368 + bool is_illegal() const { return kind_field() == illegal_value; } 1.369 + bool is_valid() const { return kind_field() != illegal_value; } 1.370 + 1.371 + bool is_register() const { return is_cpu_register() || is_fpu_register(); } 1.372 + bool is_virtual() const { return is_virtual_cpu() || is_virtual_fpu(); } 1.373 + 1.374 + bool is_constant() const { return is_pointer() && pointer()->as_constant() != NULL; } 1.375 + bool is_address() const { return is_pointer() && pointer()->as_address() != NULL; } 1.376 + 1.377 + bool is_float_kind() const { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); } 1.378 + bool is_oop() const; 1.379 + 1.380 + // semantic for fpu- and xmm-registers: 1.381 + // * is_float and is_double return true for xmm_registers 1.382 + // (so is_single_fpu and is_single_xmm are true) 1.383 + // * So you must always check for is_???_xmm prior to is_???_fpu to 1.384 + // distinguish between fpu- and xmm-registers 1.385 + 1.386 + bool is_stack() const { validate_type(); return check_value_mask(kind_mask, stack_value); } 1.387 + bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | single_size); } 1.388 + bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | double_size); } 1.389 + 1.390 + bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask, cpu_register); } 1.391 + bool is_virtual_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); } 1.392 + bool is_fixed_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register); } 1.393 + bool is_single_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | single_size); } 1.394 + bool is_double_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | double_size); } 1.395 + 1.396 + bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask, fpu_register); } 1.397 + bool is_virtual_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); } 1.398 + bool is_fixed_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register); } 1.399 + bool is_single_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | single_size); } 1.400 + bool is_double_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | double_size); } 1.401 + 1.402 + bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask, fpu_register | is_xmm_mask); } 1.403 + bool is_single_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); } 1.404 + bool is_double_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); } 1.405 + 1.406 + // fast accessor functions for special bits that do not work for pointers 1.407 + // (in this functions, the check for is_pointer() is omitted) 1.408 + bool is_single_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); } 1.409 + bool is_double_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); } 1.410 + bool is_virtual_register() const { assert(is_register(), "type check"); return check_value_mask(virtual_mask, virtual_mask); } 1.411 + bool is_oop_register() const { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; } 1.412 + BasicType type_register() const { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid()); } 1.413 + 1.414 + bool is_last_use() const { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; } 1.415 + bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; } 1.416 + LIR_Opr make_last_use() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); } 1.417 + LIR_Opr make_fpu_stack_offset() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); } 1.418 + 1.419 + 1.420 + int single_stack_ix() const { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); } 1.421 + int double_stack_ix() const { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); } 1.422 + RegNr cpu_regnr() const { assert(is_single_cpu() && !is_virtual(), "type check"); return (RegNr)data(); } 1.423 + RegNr cpu_regnrLo() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); } 1.424 + RegNr cpu_regnrHi() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); } 1.425 + RegNr fpu_regnr() const { assert(is_single_fpu() && !is_virtual(), "type check"); return (RegNr)data(); } 1.426 + RegNr fpu_regnrLo() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); } 1.427 + RegNr fpu_regnrHi() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); } 1.428 + RegNr xmm_regnr() const { assert(is_single_xmm() && !is_virtual(), "type check"); return (RegNr)data(); } 1.429 + RegNr xmm_regnrLo() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); } 1.430 + RegNr xmm_regnrHi() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); } 1.431 + int vreg_number() const { assert(is_virtual(), "type check"); return (RegNr)data(); } 1.432 + 1.433 + LIR_OprPtr* pointer() const { assert(is_pointer(), "type check"); return (LIR_OprPtr*)this; } 1.434 + LIR_Const* as_constant_ptr() const { return pointer()->as_constant(); } 1.435 + LIR_Address* as_address_ptr() const { return pointer()->as_address(); } 1.436 + 1.437 + Register as_register() const; 1.438 + Register as_register_lo() const; 1.439 + Register as_register_hi() const; 1.440 + 1.441 + Register as_pointer_register() { 1.442 +#ifdef _LP64 1.443 + if (is_double_cpu()) { 1.444 + assert(as_register_lo() == as_register_hi(), "should be a single register"); 1.445 + return as_register_lo(); 1.446 + } 1.447 +#endif 1.448 + return as_register(); 1.449 + } 1.450 + 1.451 +#ifdef X86 1.452 + XMMRegister as_xmm_float_reg() const; 1.453 + XMMRegister as_xmm_double_reg() const; 1.454 + // for compatibility with RInfo 1.455 + int fpu () const { return lo_reg_half(); } 1.456 +#endif // X86 1.457 +#if defined(SPARC) || defined(ARM) || defined(PPC) 1.458 + FloatRegister as_float_reg () const; 1.459 + FloatRegister as_double_reg () const; 1.460 +#endif 1.461 + 1.462 + jint as_jint() const { return as_constant_ptr()->as_jint(); } 1.463 + jlong as_jlong() const { return as_constant_ptr()->as_jlong(); } 1.464 + jfloat as_jfloat() const { return as_constant_ptr()->as_jfloat(); } 1.465 + jdouble as_jdouble() const { return as_constant_ptr()->as_jdouble(); } 1.466 + jobject as_jobject() const { return as_constant_ptr()->as_jobject(); } 1.467 + 1.468 + void print() const PRODUCT_RETURN; 1.469 + void print(outputStream* out) const PRODUCT_RETURN; 1.470 +}; 1.471 + 1.472 + 1.473 +inline LIR_OprDesc::OprType as_OprType(BasicType type) { 1.474 + switch (type) { 1.475 + case T_INT: return LIR_OprDesc::int_type; 1.476 + case T_LONG: return LIR_OprDesc::long_type; 1.477 + case T_FLOAT: return LIR_OprDesc::float_type; 1.478 + case T_DOUBLE: return LIR_OprDesc::double_type; 1.479 + case T_OBJECT: 1.480 + case T_ARRAY: return LIR_OprDesc::object_type; 1.481 + case T_ADDRESS: return LIR_OprDesc::address_type; 1.482 + case T_METADATA: return LIR_OprDesc::metadata_type; 1.483 + case T_ILLEGAL: // fall through 1.484 + default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type; 1.485 + } 1.486 +} 1.487 + 1.488 +inline BasicType as_BasicType(LIR_OprDesc::OprType t) { 1.489 + switch (t) { 1.490 + case LIR_OprDesc::int_type: return T_INT; 1.491 + case LIR_OprDesc::long_type: return T_LONG; 1.492 + case LIR_OprDesc::float_type: return T_FLOAT; 1.493 + case LIR_OprDesc::double_type: return T_DOUBLE; 1.494 + case LIR_OprDesc::object_type: return T_OBJECT; 1.495 + case LIR_OprDesc::address_type: return T_ADDRESS; 1.496 + case LIR_OprDesc::metadata_type:return T_METADATA; 1.497 + case LIR_OprDesc::unknown_type: // fall through 1.498 + default: ShouldNotReachHere(); return T_ILLEGAL; 1.499 + } 1.500 +} 1.501 + 1.502 + 1.503 +// LIR_Address 1.504 +class LIR_Address: public LIR_OprPtr { 1.505 + friend class LIR_OpVisitState; 1.506 + 1.507 + public: 1.508 + // NOTE: currently these must be the log2 of the scale factor (and 1.509 + // must also be equivalent to the ScaleFactor enum in 1.510 + // assembler_i486.hpp) 1.511 + enum Scale { 1.512 + times_1 = 0, 1.513 + times_2 = 1, 1.514 + times_4 = 2, 1.515 + times_8 = 3 1.516 + }; 1.517 + 1.518 + private: 1.519 + LIR_Opr _base; 1.520 + LIR_Opr _index; 1.521 + Scale _scale; 1.522 + intx _disp; 1.523 + BasicType _type; 1.524 + 1.525 + public: 1.526 + LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type): 1.527 + _base(base) 1.528 + , _index(index) 1.529 + , _scale(times_1) 1.530 + , _type(type) 1.531 + , _disp(0) { verify(); } 1.532 + 1.533 + LIR_Address(LIR_Opr base, intx disp, BasicType type): 1.534 + _base(base) 1.535 + , _index(LIR_OprDesc::illegalOpr()) 1.536 + , _scale(times_1) 1.537 + , _type(type) 1.538 + , _disp(disp) { verify(); } 1.539 + 1.540 + LIR_Address(LIR_Opr base, BasicType type): 1.541 + _base(base) 1.542 + , _index(LIR_OprDesc::illegalOpr()) 1.543 + , _scale(times_1) 1.544 + , _type(type) 1.545 + , _disp(0) { verify(); } 1.546 + 1.547 +#if defined(X86) || defined(ARM) 1.548 + LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type): 1.549 + _base(base) 1.550 + , _index(index) 1.551 + , _scale(scale) 1.552 + , _type(type) 1.553 + , _disp(disp) { verify(); } 1.554 +#endif // X86 || ARM 1.555 + 1.556 + LIR_Opr base() const { return _base; } 1.557 + LIR_Opr index() const { return _index; } 1.558 + Scale scale() const { return _scale; } 1.559 + intx disp() const { return _disp; } 1.560 + 1.561 + bool equals(LIR_Address* other) const { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); } 1.562 + 1.563 + virtual LIR_Address* as_address() { return this; } 1.564 + virtual BasicType type() const { return _type; } 1.565 + virtual void print_value_on(outputStream* out) const PRODUCT_RETURN; 1.566 + 1.567 + void verify() const PRODUCT_RETURN; 1.568 + 1.569 + static Scale scale(BasicType type); 1.570 +}; 1.571 + 1.572 + 1.573 +// operand factory 1.574 +class LIR_OprFact: public AllStatic { 1.575 + public: 1.576 + 1.577 + static LIR_Opr illegalOpr; 1.578 + 1.579 + static LIR_Opr single_cpu(int reg) { 1.580 + return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.581 + LIR_OprDesc::int_type | 1.582 + LIR_OprDesc::cpu_register | 1.583 + LIR_OprDesc::single_size); 1.584 + } 1.585 + static LIR_Opr single_cpu_oop(int reg) { 1.586 + return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.587 + LIR_OprDesc::object_type | 1.588 + LIR_OprDesc::cpu_register | 1.589 + LIR_OprDesc::single_size); 1.590 + } 1.591 + static LIR_Opr single_cpu_address(int reg) { 1.592 + return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.593 + LIR_OprDesc::address_type | 1.594 + LIR_OprDesc::cpu_register | 1.595 + LIR_OprDesc::single_size); 1.596 + } 1.597 + static LIR_Opr single_cpu_metadata(int reg) { 1.598 + return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.599 + LIR_OprDesc::metadata_type | 1.600 + LIR_OprDesc::cpu_register | 1.601 + LIR_OprDesc::single_size); 1.602 + } 1.603 + static LIR_Opr double_cpu(int reg1, int reg2) { 1.604 + LP64_ONLY(assert(reg1 == reg2, "must be identical")); 1.605 + return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | 1.606 + (reg2 << LIR_OprDesc::reg2_shift) | 1.607 + LIR_OprDesc::long_type | 1.608 + LIR_OprDesc::cpu_register | 1.609 + LIR_OprDesc::double_size); 1.610 + } 1.611 + 1.612 + static LIR_Opr single_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.613 + LIR_OprDesc::float_type | 1.614 + LIR_OprDesc::fpu_register | 1.615 + LIR_OprDesc::single_size); } 1.616 +#if defined(ARM) 1.617 + static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } 1.618 + static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } 1.619 + static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); } 1.620 +#endif 1.621 +#ifdef SPARC 1.622 + static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | 1.623 + (reg2 << LIR_OprDesc::reg2_shift) | 1.624 + LIR_OprDesc::double_type | 1.625 + LIR_OprDesc::fpu_register | 1.626 + LIR_OprDesc::double_size); } 1.627 +#endif 1.628 +#ifdef X86 1.629 + static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.630 + (reg << LIR_OprDesc::reg2_shift) | 1.631 + LIR_OprDesc::double_type | 1.632 + LIR_OprDesc::fpu_register | 1.633 + LIR_OprDesc::double_size); } 1.634 + 1.635 + static LIR_Opr single_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.636 + LIR_OprDesc::float_type | 1.637 + LIR_OprDesc::fpu_register | 1.638 + LIR_OprDesc::single_size | 1.639 + LIR_OprDesc::is_xmm_mask); } 1.640 + static LIR_Opr double_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.641 + (reg << LIR_OprDesc::reg2_shift) | 1.642 + LIR_OprDesc::double_type | 1.643 + LIR_OprDesc::fpu_register | 1.644 + LIR_OprDesc::double_size | 1.645 + LIR_OprDesc::is_xmm_mask); } 1.646 +#endif // X86 1.647 +#ifdef PPC 1.648 + static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | 1.649 + (reg << LIR_OprDesc::reg2_shift) | 1.650 + LIR_OprDesc::double_type | 1.651 + LIR_OprDesc::fpu_register | 1.652 + LIR_OprDesc::double_size); } 1.653 + static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | 1.654 + LIR_OprDesc::float_type | 1.655 + LIR_OprDesc::cpu_register | 1.656 + LIR_OprDesc::single_size); } 1.657 + static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift) | 1.658 + (reg1 << LIR_OprDesc::reg2_shift) | 1.659 + LIR_OprDesc::double_type | 1.660 + LIR_OprDesc::cpu_register | 1.661 + LIR_OprDesc::double_size); } 1.662 +#endif // PPC 1.663 + 1.664 + static LIR_Opr virtual_register(int index, BasicType type) { 1.665 + LIR_Opr res; 1.666 + switch (type) { 1.667 + case T_OBJECT: // fall through 1.668 + case T_ARRAY: 1.669 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.670 + LIR_OprDesc::object_type | 1.671 + LIR_OprDesc::cpu_register | 1.672 + LIR_OprDesc::single_size | 1.673 + LIR_OprDesc::virtual_mask); 1.674 + break; 1.675 + 1.676 + case T_METADATA: 1.677 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.678 + LIR_OprDesc::metadata_type| 1.679 + LIR_OprDesc::cpu_register | 1.680 + LIR_OprDesc::single_size | 1.681 + LIR_OprDesc::virtual_mask); 1.682 + break; 1.683 + 1.684 + case T_INT: 1.685 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.686 + LIR_OprDesc::int_type | 1.687 + LIR_OprDesc::cpu_register | 1.688 + LIR_OprDesc::single_size | 1.689 + LIR_OprDesc::virtual_mask); 1.690 + break; 1.691 + 1.692 + case T_ADDRESS: 1.693 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.694 + LIR_OprDesc::address_type | 1.695 + LIR_OprDesc::cpu_register | 1.696 + LIR_OprDesc::single_size | 1.697 + LIR_OprDesc::virtual_mask); 1.698 + break; 1.699 + 1.700 + case T_LONG: 1.701 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.702 + LIR_OprDesc::long_type | 1.703 + LIR_OprDesc::cpu_register | 1.704 + LIR_OprDesc::double_size | 1.705 + LIR_OprDesc::virtual_mask); 1.706 + break; 1.707 + 1.708 +#ifdef __SOFTFP__ 1.709 + case T_FLOAT: 1.710 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.711 + LIR_OprDesc::float_type | 1.712 + LIR_OprDesc::cpu_register | 1.713 + LIR_OprDesc::single_size | 1.714 + LIR_OprDesc::virtual_mask); 1.715 + break; 1.716 + case T_DOUBLE: 1.717 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.718 + LIR_OprDesc::double_type | 1.719 + LIR_OprDesc::cpu_register | 1.720 + LIR_OprDesc::double_size | 1.721 + LIR_OprDesc::virtual_mask); 1.722 + break; 1.723 +#else // __SOFTFP__ 1.724 + case T_FLOAT: 1.725 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.726 + LIR_OprDesc::float_type | 1.727 + LIR_OprDesc::fpu_register | 1.728 + LIR_OprDesc::single_size | 1.729 + LIR_OprDesc::virtual_mask); 1.730 + break; 1.731 + 1.732 + case 1.733 + T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.734 + LIR_OprDesc::double_type | 1.735 + LIR_OprDesc::fpu_register | 1.736 + LIR_OprDesc::double_size | 1.737 + LIR_OprDesc::virtual_mask); 1.738 + break; 1.739 +#endif // __SOFTFP__ 1.740 + default: ShouldNotReachHere(); res = illegalOpr; 1.741 + } 1.742 + 1.743 +#ifdef ASSERT 1.744 + res->validate_type(); 1.745 + assert(res->vreg_number() == index, "conversion check"); 1.746 + assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base"); 1.747 + assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big"); 1.748 + 1.749 + // old-style calculation; check if old and new method are equal 1.750 + LIR_OprDesc::OprType t = as_OprType(type); 1.751 +#ifdef __SOFTFP__ 1.752 + LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.753 + t | 1.754 + LIR_OprDesc::cpu_register | 1.755 + LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask); 1.756 +#else // __SOFTFP__ 1.757 + LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t | 1.758 + ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) | 1.759 + LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask); 1.760 + assert(res == old_res, "old and new method not equal"); 1.761 +#endif // __SOFTFP__ 1.762 +#endif // ASSERT 1.763 + 1.764 + return res; 1.765 + } 1.766 + 1.767 + // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as 1.768 + // the index is platform independent; a double stack useing indeces 2 and 3 has always 1.769 + // index 2. 1.770 + static LIR_Opr stack(int index, BasicType type) { 1.771 + LIR_Opr res; 1.772 + switch (type) { 1.773 + case T_OBJECT: // fall through 1.774 + case T_ARRAY: 1.775 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.776 + LIR_OprDesc::object_type | 1.777 + LIR_OprDesc::stack_value | 1.778 + LIR_OprDesc::single_size); 1.779 + break; 1.780 + 1.781 + case T_METADATA: 1.782 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.783 + LIR_OprDesc::metadata_type | 1.784 + LIR_OprDesc::stack_value | 1.785 + LIR_OprDesc::single_size); 1.786 + break; 1.787 + case T_INT: 1.788 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.789 + LIR_OprDesc::int_type | 1.790 + LIR_OprDesc::stack_value | 1.791 + LIR_OprDesc::single_size); 1.792 + break; 1.793 + 1.794 + case T_ADDRESS: 1.795 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.796 + LIR_OprDesc::address_type | 1.797 + LIR_OprDesc::stack_value | 1.798 + LIR_OprDesc::single_size); 1.799 + break; 1.800 + 1.801 + case T_LONG: 1.802 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.803 + LIR_OprDesc::long_type | 1.804 + LIR_OprDesc::stack_value | 1.805 + LIR_OprDesc::double_size); 1.806 + break; 1.807 + 1.808 + case T_FLOAT: 1.809 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.810 + LIR_OprDesc::float_type | 1.811 + LIR_OprDesc::stack_value | 1.812 + LIR_OprDesc::single_size); 1.813 + break; 1.814 + case T_DOUBLE: 1.815 + res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.816 + LIR_OprDesc::double_type | 1.817 + LIR_OprDesc::stack_value | 1.818 + LIR_OprDesc::double_size); 1.819 + break; 1.820 + 1.821 + default: ShouldNotReachHere(); res = illegalOpr; 1.822 + } 1.823 + 1.824 +#ifdef ASSERT 1.825 + assert(index >= 0, "index must be positive"); 1.826 + assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big"); 1.827 + 1.828 + LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | 1.829 + LIR_OprDesc::stack_value | 1.830 + as_OprType(type) | 1.831 + LIR_OprDesc::size_for(type)); 1.832 + assert(res == old_res, "old and new method not equal"); 1.833 +#endif 1.834 + 1.835 + return res; 1.836 + } 1.837 + 1.838 + static LIR_Opr intConst(jint i) { return (LIR_Opr)(new LIR_Const(i)); } 1.839 + static LIR_Opr longConst(jlong l) { return (LIR_Opr)(new LIR_Const(l)); } 1.840 + static LIR_Opr floatConst(jfloat f) { return (LIR_Opr)(new LIR_Const(f)); } 1.841 + static LIR_Opr doubleConst(jdouble d) { return (LIR_Opr)(new LIR_Const(d)); } 1.842 + static LIR_Opr oopConst(jobject o) { return (LIR_Opr)(new LIR_Const(o)); } 1.843 + static LIR_Opr address(LIR_Address* a) { return (LIR_Opr)a; } 1.844 + static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); } 1.845 + static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); } 1.846 + static LIR_Opr illegal() { return (LIR_Opr)-1; } 1.847 + static LIR_Opr addressConst(jint i) { return (LIR_Opr)(new LIR_Const(i, true)); } 1.848 + static LIR_Opr metadataConst(Metadata* m) { return (LIR_Opr)(new LIR_Const(m)); } 1.849 + 1.850 + static LIR_Opr value_type(ValueType* type); 1.851 + static LIR_Opr dummy_value_type(ValueType* type); 1.852 +}; 1.853 + 1.854 + 1.855 +//------------------------------------------------------------------------------- 1.856 +// LIR Instructions 1.857 +//------------------------------------------------------------------------------- 1.858 +// 1.859 +// Note: 1.860 +// - every instruction has a result operand 1.861 +// - every instruction has an CodeEmitInfo operand (can be revisited later) 1.862 +// - every instruction has a LIR_OpCode operand 1.863 +// - LIR_OpN, means an instruction that has N input operands 1.864 +// 1.865 +// class hierarchy: 1.866 +// 1.867 +class LIR_Op; 1.868 +class LIR_Op0; 1.869 +class LIR_OpLabel; 1.870 +class LIR_Op1; 1.871 +class LIR_OpBranch; 1.872 +class LIR_OpConvert; 1.873 +class LIR_OpAllocObj; 1.874 +class LIR_OpRoundFP; 1.875 +class LIR_Op2; 1.876 +class LIR_OpDelay; 1.877 +class LIR_Op3; 1.878 +class LIR_OpAllocArray; 1.879 +class LIR_OpCall; 1.880 +class LIR_OpJavaCall; 1.881 +class LIR_OpRTCall; 1.882 +class LIR_OpArrayCopy; 1.883 +class LIR_OpUpdateCRC32; 1.884 +class LIR_OpLock; 1.885 +class LIR_OpTypeCheck; 1.886 +class LIR_OpCompareAndSwap; 1.887 +class LIR_OpProfileCall; 1.888 +class LIR_OpProfileType; 1.889 +#ifdef ASSERT 1.890 +class LIR_OpAssert; 1.891 +#endif 1.892 + 1.893 +// LIR operation codes 1.894 +enum LIR_Code { 1.895 + lir_none 1.896 + , begin_op0 1.897 + , lir_word_align 1.898 + , lir_label 1.899 + , lir_nop 1.900 + , lir_backwardbranch_target 1.901 + , lir_std_entry 1.902 + , lir_osr_entry 1.903 + , lir_build_frame 1.904 + , lir_fpop_raw 1.905 + , lir_24bit_FPU 1.906 + , lir_reset_FPU 1.907 + , lir_breakpoint 1.908 + , lir_rtcall 1.909 + , lir_membar 1.910 + , lir_membar_acquire 1.911 + , lir_membar_release 1.912 + , lir_membar_loadload 1.913 + , lir_membar_storestore 1.914 + , lir_membar_loadstore 1.915 + , lir_membar_storeload 1.916 + , lir_get_thread 1.917 + , end_op0 1.918 + , begin_op1 1.919 + , lir_fxch 1.920 + , lir_fld 1.921 + , lir_ffree 1.922 + , lir_push 1.923 + , lir_pop 1.924 + , lir_null_check 1.925 + , lir_return 1.926 + , lir_leal 1.927 + , lir_neg 1.928 + , lir_branch 1.929 + , lir_cond_float_branch 1.930 + , lir_move 1.931 + , lir_prefetchr 1.932 + , lir_prefetchw 1.933 + , lir_convert 1.934 + , lir_alloc_object 1.935 + , lir_monaddr 1.936 + , lir_roundfp 1.937 + , lir_safepoint 1.938 + , lir_pack64 1.939 + , lir_unpack64 1.940 + , lir_unwind 1.941 + , end_op1 1.942 + , begin_op2 1.943 + , lir_cmp 1.944 + , lir_cmp_l2i 1.945 + , lir_ucmp_fd2i 1.946 + , lir_cmp_fd2i 1.947 + , lir_cmove 1.948 + , lir_add 1.949 + , lir_sub 1.950 + , lir_mul 1.951 + , lir_mul_strictfp 1.952 + , lir_div 1.953 + , lir_div_strictfp 1.954 + , lir_rem 1.955 + , lir_sqrt 1.956 + , lir_abs 1.957 + , lir_sin 1.958 + , lir_cos 1.959 + , lir_tan 1.960 + , lir_log 1.961 + , lir_log10 1.962 + , lir_exp 1.963 + , lir_pow 1.964 + , lir_logic_and 1.965 + , lir_logic_or 1.966 + , lir_logic_xor 1.967 + , lir_shl 1.968 + , lir_shr 1.969 + , lir_ushr 1.970 + , lir_alloc_array 1.971 + , lir_throw 1.972 + , lir_compare_to 1.973 + , lir_xadd 1.974 + , lir_xchg 1.975 + , end_op2 1.976 + , begin_op3 1.977 + , lir_idiv 1.978 + , lir_irem 1.979 + , end_op3 1.980 + , begin_opJavaCall 1.981 + , lir_static_call 1.982 + , lir_optvirtual_call 1.983 + , lir_icvirtual_call 1.984 + , lir_virtual_call 1.985 + , lir_dynamic_call 1.986 + , end_opJavaCall 1.987 + , begin_opArrayCopy 1.988 + , lir_arraycopy 1.989 + , end_opArrayCopy 1.990 + , begin_opUpdateCRC32 1.991 + , lir_updatecrc32 1.992 + , end_opUpdateCRC32 1.993 + , begin_opLock 1.994 + , lir_lock 1.995 + , lir_unlock 1.996 + , end_opLock 1.997 + , begin_delay_slot 1.998 + , lir_delay_slot 1.999 + , end_delay_slot 1.1000 + , begin_opTypeCheck 1.1001 + , lir_instanceof 1.1002 + , lir_checkcast 1.1003 + , lir_store_check 1.1004 + , end_opTypeCheck 1.1005 + , begin_opCompareAndSwap 1.1006 + , lir_cas_long 1.1007 + , lir_cas_obj 1.1008 + , lir_cas_int 1.1009 + , end_opCompareAndSwap 1.1010 + , begin_opMDOProfile 1.1011 + , lir_profile_call 1.1012 + , lir_profile_type 1.1013 + , end_opMDOProfile 1.1014 + , begin_opAssert 1.1015 + , lir_assert 1.1016 + , end_opAssert 1.1017 +}; 1.1018 + 1.1019 + 1.1020 +enum LIR_Condition { 1.1021 + lir_cond_equal 1.1022 + , lir_cond_notEqual 1.1023 + , lir_cond_less 1.1024 + , lir_cond_lessEqual 1.1025 + , lir_cond_greaterEqual 1.1026 + , lir_cond_greater 1.1027 + , lir_cond_belowEqual 1.1028 + , lir_cond_aboveEqual 1.1029 + , lir_cond_always 1.1030 + , lir_cond_unknown = -1 1.1031 +}; 1.1032 + 1.1033 + 1.1034 +enum LIR_PatchCode { 1.1035 + lir_patch_none, 1.1036 + lir_patch_low, 1.1037 + lir_patch_high, 1.1038 + lir_patch_normal 1.1039 +}; 1.1040 + 1.1041 + 1.1042 +enum LIR_MoveKind { 1.1043 + lir_move_normal, 1.1044 + lir_move_volatile, 1.1045 + lir_move_unaligned, 1.1046 + lir_move_wide, 1.1047 + lir_move_max_flag 1.1048 +}; 1.1049 + 1.1050 + 1.1051 +// -------------------------------------------------- 1.1052 +// LIR_Op 1.1053 +// -------------------------------------------------- 1.1054 +class LIR_Op: public CompilationResourceObj { 1.1055 + friend class LIR_OpVisitState; 1.1056 + 1.1057 +#ifdef ASSERT 1.1058 + private: 1.1059 + const char * _file; 1.1060 + int _line; 1.1061 +#endif 1.1062 + 1.1063 + protected: 1.1064 + LIR_Opr _result; 1.1065 + unsigned short _code; 1.1066 + unsigned short _flags; 1.1067 + CodeEmitInfo* _info; 1.1068 + int _id; // value id for register allocation 1.1069 + int _fpu_pop_count; 1.1070 + Instruction* _source; // for debugging 1.1071 + 1.1072 + static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN; 1.1073 + 1.1074 + protected: 1.1075 + static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end) { return start < test && test < end; } 1.1076 + 1.1077 + public: 1.1078 + LIR_Op() 1.1079 + : _result(LIR_OprFact::illegalOpr) 1.1080 + , _code(lir_none) 1.1081 + , _flags(0) 1.1082 + , _info(NULL) 1.1083 +#ifdef ASSERT 1.1084 + , _file(NULL) 1.1085 + , _line(0) 1.1086 +#endif 1.1087 + , _fpu_pop_count(0) 1.1088 + , _source(NULL) 1.1089 + , _id(-1) {} 1.1090 + 1.1091 + LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info) 1.1092 + : _result(result) 1.1093 + , _code(code) 1.1094 + , _flags(0) 1.1095 + , _info(info) 1.1096 +#ifdef ASSERT 1.1097 + , _file(NULL) 1.1098 + , _line(0) 1.1099 +#endif 1.1100 + , _fpu_pop_count(0) 1.1101 + , _source(NULL) 1.1102 + , _id(-1) {} 1.1103 + 1.1104 + CodeEmitInfo* info() const { return _info; } 1.1105 + LIR_Code code() const { return (LIR_Code)_code; } 1.1106 + LIR_Opr result_opr() const { return _result; } 1.1107 + void set_result_opr(LIR_Opr opr) { _result = opr; } 1.1108 + 1.1109 +#ifdef ASSERT 1.1110 + void set_file_and_line(const char * file, int line) { 1.1111 + _file = file; 1.1112 + _line = line; 1.1113 + } 1.1114 +#endif 1.1115 + 1.1116 + virtual const char * name() const PRODUCT_RETURN0; 1.1117 + 1.1118 + int id() const { return _id; } 1.1119 + void set_id(int id) { _id = id; } 1.1120 + 1.1121 + // FPU stack simulation helpers -- only used on Intel 1.1122 + void set_fpu_pop_count(int count) { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; } 1.1123 + int fpu_pop_count() const { return _fpu_pop_count; } 1.1124 + bool pop_fpu_stack() { return _fpu_pop_count > 0; } 1.1125 + 1.1126 + Instruction* source() const { return _source; } 1.1127 + void set_source(Instruction* ins) { _source = ins; } 1.1128 + 1.1129 + virtual void emit_code(LIR_Assembler* masm) = 0; 1.1130 + virtual void print_instr(outputStream* out) const = 0; 1.1131 + virtual void print_on(outputStream* st) const PRODUCT_RETURN; 1.1132 + 1.1133 + virtual bool is_patching() { return false; } 1.1134 + virtual LIR_OpCall* as_OpCall() { return NULL; } 1.1135 + virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; } 1.1136 + virtual LIR_OpLabel* as_OpLabel() { return NULL; } 1.1137 + virtual LIR_OpDelay* as_OpDelay() { return NULL; } 1.1138 + virtual LIR_OpLock* as_OpLock() { return NULL; } 1.1139 + virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; } 1.1140 + virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; } 1.1141 + virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; } 1.1142 + virtual LIR_OpBranch* as_OpBranch() { return NULL; } 1.1143 + virtual LIR_OpRTCall* as_OpRTCall() { return NULL; } 1.1144 + virtual LIR_OpConvert* as_OpConvert() { return NULL; } 1.1145 + virtual LIR_Op0* as_Op0() { return NULL; } 1.1146 + virtual LIR_Op1* as_Op1() { return NULL; } 1.1147 + virtual LIR_Op2* as_Op2() { return NULL; } 1.1148 + virtual LIR_Op3* as_Op3() { return NULL; } 1.1149 + virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } 1.1150 + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; } 1.1151 + virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } 1.1152 + virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } 1.1153 + virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } 1.1154 + virtual LIR_OpProfileType* as_OpProfileType() { return NULL; } 1.1155 +#ifdef ASSERT 1.1156 + virtual LIR_OpAssert* as_OpAssert() { return NULL; } 1.1157 +#endif 1.1158 + 1.1159 + virtual void verify() const {} 1.1160 +}; 1.1161 + 1.1162 +// for calls 1.1163 +class LIR_OpCall: public LIR_Op { 1.1164 + friend class LIR_OpVisitState; 1.1165 + 1.1166 + protected: 1.1167 + address _addr; 1.1168 + LIR_OprList* _arguments; 1.1169 + protected: 1.1170 + LIR_OpCall(LIR_Code code, address addr, LIR_Opr result, 1.1171 + LIR_OprList* arguments, CodeEmitInfo* info = NULL) 1.1172 + : LIR_Op(code, result, info) 1.1173 + , _arguments(arguments) 1.1174 + , _addr(addr) {} 1.1175 + 1.1176 + public: 1.1177 + address addr() const { return _addr; } 1.1178 + const LIR_OprList* arguments() const { return _arguments; } 1.1179 + virtual LIR_OpCall* as_OpCall() { return this; } 1.1180 +}; 1.1181 + 1.1182 + 1.1183 +// -------------------------------------------------- 1.1184 +// LIR_OpJavaCall 1.1185 +// -------------------------------------------------- 1.1186 +class LIR_OpJavaCall: public LIR_OpCall { 1.1187 + friend class LIR_OpVisitState; 1.1188 + 1.1189 + private: 1.1190 + ciMethod* _method; 1.1191 + LIR_Opr _receiver; 1.1192 + LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr. 1.1193 + 1.1194 + public: 1.1195 + LIR_OpJavaCall(LIR_Code code, ciMethod* method, 1.1196 + LIR_Opr receiver, LIR_Opr result, 1.1197 + address addr, LIR_OprList* arguments, 1.1198 + CodeEmitInfo* info) 1.1199 + : LIR_OpCall(code, addr, result, arguments, info) 1.1200 + , _receiver(receiver) 1.1201 + , _method(method) 1.1202 + , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr) 1.1203 + { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); } 1.1204 + 1.1205 + LIR_OpJavaCall(LIR_Code code, ciMethod* method, 1.1206 + LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset, 1.1207 + LIR_OprList* arguments, CodeEmitInfo* info) 1.1208 + : LIR_OpCall(code, (address)vtable_offset, result, arguments, info) 1.1209 + , _receiver(receiver) 1.1210 + , _method(method) 1.1211 + , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr) 1.1212 + { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); } 1.1213 + 1.1214 + LIR_Opr receiver() const { return _receiver; } 1.1215 + ciMethod* method() const { return _method; } 1.1216 + 1.1217 + // JSR 292 support. 1.1218 + bool is_invokedynamic() const { return code() == lir_dynamic_call; } 1.1219 + bool is_method_handle_invoke() const { 1.1220 + return 1.1221 + method()->is_compiled_lambda_form() // Java-generated adapter 1.1222 + || 1.1223 + method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic 1.1224 + } 1.1225 + 1.1226 + intptr_t vtable_offset() const { 1.1227 + assert(_code == lir_virtual_call, "only have vtable for real vcall"); 1.1228 + return (intptr_t) addr(); 1.1229 + } 1.1230 + 1.1231 + virtual void emit_code(LIR_Assembler* masm); 1.1232 + virtual LIR_OpJavaCall* as_OpJavaCall() { return this; } 1.1233 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1234 +}; 1.1235 + 1.1236 +// -------------------------------------------------- 1.1237 +// LIR_OpLabel 1.1238 +// -------------------------------------------------- 1.1239 +// Location where a branch can continue 1.1240 +class LIR_OpLabel: public LIR_Op { 1.1241 + friend class LIR_OpVisitState; 1.1242 + 1.1243 + private: 1.1244 + Label* _label; 1.1245 + public: 1.1246 + LIR_OpLabel(Label* lbl) 1.1247 + : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL) 1.1248 + , _label(lbl) {} 1.1249 + Label* label() const { return _label; } 1.1250 + 1.1251 + virtual void emit_code(LIR_Assembler* masm); 1.1252 + virtual LIR_OpLabel* as_OpLabel() { return this; } 1.1253 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1254 +}; 1.1255 + 1.1256 +// LIR_OpArrayCopy 1.1257 +class LIR_OpArrayCopy: public LIR_Op { 1.1258 + friend class LIR_OpVisitState; 1.1259 + 1.1260 + private: 1.1261 + ArrayCopyStub* _stub; 1.1262 + LIR_Opr _src; 1.1263 + LIR_Opr _src_pos; 1.1264 + LIR_Opr _dst; 1.1265 + LIR_Opr _dst_pos; 1.1266 + LIR_Opr _length; 1.1267 + LIR_Opr _tmp; 1.1268 + ciArrayKlass* _expected_type; 1.1269 + int _flags; 1.1270 + 1.1271 +public: 1.1272 + enum Flags { 1.1273 + src_null_check = 1 << 0, 1.1274 + dst_null_check = 1 << 1, 1.1275 + src_pos_positive_check = 1 << 2, 1.1276 + dst_pos_positive_check = 1 << 3, 1.1277 + length_positive_check = 1 << 4, 1.1278 + src_range_check = 1 << 5, 1.1279 + dst_range_check = 1 << 6, 1.1280 + type_check = 1 << 7, 1.1281 + overlapping = 1 << 8, 1.1282 + unaligned = 1 << 9, 1.1283 + src_objarray = 1 << 10, 1.1284 + dst_objarray = 1 << 11, 1.1285 + all_flags = (1 << 12) - 1 1.1286 + }; 1.1287 + 1.1288 + LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, 1.1289 + ciArrayKlass* expected_type, int flags, CodeEmitInfo* info); 1.1290 + 1.1291 + LIR_Opr src() const { return _src; } 1.1292 + LIR_Opr src_pos() const { return _src_pos; } 1.1293 + LIR_Opr dst() const { return _dst; } 1.1294 + LIR_Opr dst_pos() const { return _dst_pos; } 1.1295 + LIR_Opr length() const { return _length; } 1.1296 + LIR_Opr tmp() const { return _tmp; } 1.1297 + int flags() const { return _flags; } 1.1298 + ciArrayKlass* expected_type() const { return _expected_type; } 1.1299 + ArrayCopyStub* stub() const { return _stub; } 1.1300 + 1.1301 + virtual void emit_code(LIR_Assembler* masm); 1.1302 + virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; } 1.1303 + void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1304 +}; 1.1305 + 1.1306 +// LIR_OpUpdateCRC32 1.1307 +class LIR_OpUpdateCRC32: public LIR_Op { 1.1308 + friend class LIR_OpVisitState; 1.1309 + 1.1310 +private: 1.1311 + LIR_Opr _crc; 1.1312 + LIR_Opr _val; 1.1313 + 1.1314 +public: 1.1315 + 1.1316 + LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res); 1.1317 + 1.1318 + LIR_Opr crc() const { return _crc; } 1.1319 + LIR_Opr val() const { return _val; } 1.1320 + 1.1321 + virtual void emit_code(LIR_Assembler* masm); 1.1322 + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return this; } 1.1323 + void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1324 +}; 1.1325 + 1.1326 +// -------------------------------------------------- 1.1327 +// LIR_Op0 1.1328 +// -------------------------------------------------- 1.1329 +class LIR_Op0: public LIR_Op { 1.1330 + friend class LIR_OpVisitState; 1.1331 + 1.1332 + public: 1.1333 + LIR_Op0(LIR_Code code) 1.1334 + : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) { assert(is_in_range(code, begin_op0, end_op0), "code check"); } 1.1335 + LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL) 1.1336 + : LIR_Op(code, result, info) { assert(is_in_range(code, begin_op0, end_op0), "code check"); } 1.1337 + 1.1338 + virtual void emit_code(LIR_Assembler* masm); 1.1339 + virtual LIR_Op0* as_Op0() { return this; } 1.1340 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1341 +}; 1.1342 + 1.1343 + 1.1344 +// -------------------------------------------------- 1.1345 +// LIR_Op1 1.1346 +// -------------------------------------------------- 1.1347 + 1.1348 +class LIR_Op1: public LIR_Op { 1.1349 + friend class LIR_OpVisitState; 1.1350 + 1.1351 + protected: 1.1352 + LIR_Opr _opr; // input operand 1.1353 + BasicType _type; // Operand types 1.1354 + LIR_PatchCode _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?) 1.1355 + 1.1356 + static void print_patch_code(outputStream* out, LIR_PatchCode code); 1.1357 + 1.1358 + void set_kind(LIR_MoveKind kind) { 1.1359 + assert(code() == lir_move, "must be"); 1.1360 + _flags = kind; 1.1361 + } 1.1362 + 1.1363 + public: 1.1364 + LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL) 1.1365 + : LIR_Op(code, result, info) 1.1366 + , _opr(opr) 1.1367 + , _patch(patch) 1.1368 + , _type(type) { assert(is_in_range(code, begin_op1, end_op1), "code check"); } 1.1369 + 1.1370 + LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind) 1.1371 + : LIR_Op(code, result, info) 1.1372 + , _opr(opr) 1.1373 + , _patch(patch) 1.1374 + , _type(type) { 1.1375 + assert(code == lir_move, "must be"); 1.1376 + set_kind(kind); 1.1377 + } 1.1378 + 1.1379 + LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info) 1.1380 + : LIR_Op(code, LIR_OprFact::illegalOpr, info) 1.1381 + , _opr(opr) 1.1382 + , _patch(lir_patch_none) 1.1383 + , _type(T_ILLEGAL) { assert(is_in_range(code, begin_op1, end_op1), "code check"); } 1.1384 + 1.1385 + LIR_Opr in_opr() const { return _opr; } 1.1386 + LIR_PatchCode patch_code() const { return _patch; } 1.1387 + BasicType type() const { return _type; } 1.1388 + 1.1389 + LIR_MoveKind move_kind() const { 1.1390 + assert(code() == lir_move, "must be"); 1.1391 + return (LIR_MoveKind)_flags; 1.1392 + } 1.1393 + 1.1394 + virtual bool is_patching() { return _patch != lir_patch_none; } 1.1395 + virtual void emit_code(LIR_Assembler* masm); 1.1396 + virtual LIR_Op1* as_Op1() { return this; } 1.1397 + virtual const char * name() const PRODUCT_RETURN0; 1.1398 + 1.1399 + void set_in_opr(LIR_Opr opr) { _opr = opr; } 1.1400 + 1.1401 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1402 + virtual void verify() const; 1.1403 +}; 1.1404 + 1.1405 + 1.1406 +// for runtime calls 1.1407 +class LIR_OpRTCall: public LIR_OpCall { 1.1408 + friend class LIR_OpVisitState; 1.1409 + 1.1410 + private: 1.1411 + LIR_Opr _tmp; 1.1412 + public: 1.1413 + LIR_OpRTCall(address addr, LIR_Opr tmp, 1.1414 + LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL) 1.1415 + : LIR_OpCall(lir_rtcall, addr, result, arguments, info) 1.1416 + , _tmp(tmp) {} 1.1417 + 1.1418 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1419 + virtual void emit_code(LIR_Assembler* masm); 1.1420 + virtual LIR_OpRTCall* as_OpRTCall() { return this; } 1.1421 + 1.1422 + LIR_Opr tmp() const { return _tmp; } 1.1423 + 1.1424 + virtual void verify() const; 1.1425 +}; 1.1426 + 1.1427 + 1.1428 +class LIR_OpBranch: public LIR_Op { 1.1429 + friend class LIR_OpVisitState; 1.1430 + 1.1431 + private: 1.1432 + LIR_Condition _cond; 1.1433 + BasicType _type; 1.1434 + Label* _label; 1.1435 + BlockBegin* _block; // if this is a branch to a block, this is the block 1.1436 + BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block 1.1437 + CodeStub* _stub; // if this is a branch to a stub, this is the stub 1.1438 + 1.1439 + public: 1.1440 + LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl) 1.1441 + : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) 1.1442 + , _cond(cond) 1.1443 + , _type(type) 1.1444 + , _label(lbl) 1.1445 + , _block(NULL) 1.1446 + , _ublock(NULL) 1.1447 + , _stub(NULL) { } 1.1448 + 1.1449 + LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block); 1.1450 + LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub); 1.1451 + 1.1452 + // for unordered comparisons 1.1453 + LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock); 1.1454 + 1.1455 + LIR_Condition cond() const { return _cond; } 1.1456 + BasicType type() const { return _type; } 1.1457 + Label* label() const { return _label; } 1.1458 + BlockBegin* block() const { return _block; } 1.1459 + BlockBegin* ublock() const { return _ublock; } 1.1460 + CodeStub* stub() const { return _stub; } 1.1461 + 1.1462 + void change_block(BlockBegin* b); 1.1463 + void change_ublock(BlockBegin* b); 1.1464 + void negate_cond(); 1.1465 + 1.1466 + virtual void emit_code(LIR_Assembler* masm); 1.1467 + virtual LIR_OpBranch* as_OpBranch() { return this; } 1.1468 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1469 +}; 1.1470 + 1.1471 + 1.1472 +class ConversionStub; 1.1473 + 1.1474 +class LIR_OpConvert: public LIR_Op1 { 1.1475 + friend class LIR_OpVisitState; 1.1476 + 1.1477 + private: 1.1478 + Bytecodes::Code _bytecode; 1.1479 + ConversionStub* _stub; 1.1480 +#ifdef PPC 1.1481 + LIR_Opr _tmp1; 1.1482 + LIR_Opr _tmp2; 1.1483 +#endif 1.1484 + 1.1485 + public: 1.1486 + LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub) 1.1487 + : LIR_Op1(lir_convert, opr, result) 1.1488 + , _stub(stub) 1.1489 +#ifdef PPC 1.1490 + , _tmp1(LIR_OprDesc::illegalOpr()) 1.1491 + , _tmp2(LIR_OprDesc::illegalOpr()) 1.1492 +#endif 1.1493 + , _bytecode(code) {} 1.1494 + 1.1495 +#ifdef PPC 1.1496 + LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub 1.1497 + ,LIR_Opr tmp1, LIR_Opr tmp2) 1.1498 + : LIR_Op1(lir_convert, opr, result) 1.1499 + , _stub(stub) 1.1500 + , _tmp1(tmp1) 1.1501 + , _tmp2(tmp2) 1.1502 + , _bytecode(code) {} 1.1503 +#endif 1.1504 + 1.1505 + Bytecodes::Code bytecode() const { return _bytecode; } 1.1506 + ConversionStub* stub() const { return _stub; } 1.1507 +#ifdef PPC 1.1508 + LIR_Opr tmp1() const { return _tmp1; } 1.1509 + LIR_Opr tmp2() const { return _tmp2; } 1.1510 +#endif 1.1511 + 1.1512 + virtual void emit_code(LIR_Assembler* masm); 1.1513 + virtual LIR_OpConvert* as_OpConvert() { return this; } 1.1514 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1515 + 1.1516 + static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN; 1.1517 +}; 1.1518 + 1.1519 + 1.1520 +// LIR_OpAllocObj 1.1521 +class LIR_OpAllocObj : public LIR_Op1 { 1.1522 + friend class LIR_OpVisitState; 1.1523 + 1.1524 + private: 1.1525 + LIR_Opr _tmp1; 1.1526 + LIR_Opr _tmp2; 1.1527 + LIR_Opr _tmp3; 1.1528 + LIR_Opr _tmp4; 1.1529 + int _hdr_size; 1.1530 + int _obj_size; 1.1531 + CodeStub* _stub; 1.1532 + bool _init_check; 1.1533 + 1.1534 + public: 1.1535 + LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result, 1.1536 + LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, 1.1537 + int hdr_size, int obj_size, bool init_check, CodeStub* stub) 1.1538 + : LIR_Op1(lir_alloc_object, klass, result) 1.1539 + , _tmp1(t1) 1.1540 + , _tmp2(t2) 1.1541 + , _tmp3(t3) 1.1542 + , _tmp4(t4) 1.1543 + , _hdr_size(hdr_size) 1.1544 + , _obj_size(obj_size) 1.1545 + , _init_check(init_check) 1.1546 + , _stub(stub) { } 1.1547 + 1.1548 + LIR_Opr klass() const { return in_opr(); } 1.1549 + LIR_Opr obj() const { return result_opr(); } 1.1550 + LIR_Opr tmp1() const { return _tmp1; } 1.1551 + LIR_Opr tmp2() const { return _tmp2; } 1.1552 + LIR_Opr tmp3() const { return _tmp3; } 1.1553 + LIR_Opr tmp4() const { return _tmp4; } 1.1554 + int header_size() const { return _hdr_size; } 1.1555 + int object_size() const { return _obj_size; } 1.1556 + bool init_check() const { return _init_check; } 1.1557 + CodeStub* stub() const { return _stub; } 1.1558 + 1.1559 + virtual void emit_code(LIR_Assembler* masm); 1.1560 + virtual LIR_OpAllocObj * as_OpAllocObj () { return this; } 1.1561 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1562 +}; 1.1563 + 1.1564 + 1.1565 +// LIR_OpRoundFP 1.1566 +class LIR_OpRoundFP : public LIR_Op1 { 1.1567 + friend class LIR_OpVisitState; 1.1568 + 1.1569 + private: 1.1570 + LIR_Opr _tmp; 1.1571 + 1.1572 + public: 1.1573 + LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) 1.1574 + : LIR_Op1(lir_roundfp, reg, result) 1.1575 + , _tmp(stack_loc_temp) {} 1.1576 + 1.1577 + LIR_Opr tmp() const { return _tmp; } 1.1578 + virtual LIR_OpRoundFP* as_OpRoundFP() { return this; } 1.1579 + void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1580 +}; 1.1581 + 1.1582 +// LIR_OpTypeCheck 1.1583 +class LIR_OpTypeCheck: public LIR_Op { 1.1584 + friend class LIR_OpVisitState; 1.1585 + 1.1586 + private: 1.1587 + LIR_Opr _object; 1.1588 + LIR_Opr _array; 1.1589 + ciKlass* _klass; 1.1590 + LIR_Opr _tmp1; 1.1591 + LIR_Opr _tmp2; 1.1592 + LIR_Opr _tmp3; 1.1593 + bool _fast_check; 1.1594 + CodeEmitInfo* _info_for_patch; 1.1595 + CodeEmitInfo* _info_for_exception; 1.1596 + CodeStub* _stub; 1.1597 + ciMethod* _profiled_method; 1.1598 + int _profiled_bci; 1.1599 + bool _should_profile; 1.1600 + 1.1601 +public: 1.1602 + LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, 1.1603 + LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, 1.1604 + CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub); 1.1605 + LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, 1.1606 + LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception); 1.1607 + 1.1608 + LIR_Opr object() const { return _object; } 1.1609 + LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; } 1.1610 + LIR_Opr tmp1() const { return _tmp1; } 1.1611 + LIR_Opr tmp2() const { return _tmp2; } 1.1612 + LIR_Opr tmp3() const { return _tmp3; } 1.1613 + ciKlass* klass() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass; } 1.1614 + bool fast_check() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check; } 1.1615 + CodeEmitInfo* info_for_patch() const { return _info_for_patch; } 1.1616 + CodeEmitInfo* info_for_exception() const { return _info_for_exception; } 1.1617 + CodeStub* stub() const { return _stub; } 1.1618 + 1.1619 + // MethodData* profiling 1.1620 + void set_profiled_method(ciMethod *method) { _profiled_method = method; } 1.1621 + void set_profiled_bci(int bci) { _profiled_bci = bci; } 1.1622 + void set_should_profile(bool b) { _should_profile = b; } 1.1623 + ciMethod* profiled_method() const { return _profiled_method; } 1.1624 + int profiled_bci() const { return _profiled_bci; } 1.1625 + bool should_profile() const { return _should_profile; } 1.1626 + 1.1627 + virtual bool is_patching() { return _info_for_patch != NULL; } 1.1628 + virtual void emit_code(LIR_Assembler* masm); 1.1629 + virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } 1.1630 + void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1631 +}; 1.1632 + 1.1633 +// LIR_Op2 1.1634 +class LIR_Op2: public LIR_Op { 1.1635 + friend class LIR_OpVisitState; 1.1636 + 1.1637 + int _fpu_stack_size; // for sin/cos implementation on Intel 1.1638 + 1.1639 + protected: 1.1640 + LIR_Opr _opr1; 1.1641 + LIR_Opr _opr2; 1.1642 + BasicType _type; 1.1643 + LIR_Opr _tmp1; 1.1644 + LIR_Opr _tmp2; 1.1645 + LIR_Opr _tmp3; 1.1646 + LIR_Opr _tmp4; 1.1647 + LIR_Opr _tmp5; 1.1648 + LIR_Condition _condition; 1.1649 + 1.1650 + void verify() const; 1.1651 + 1.1652 + public: 1.1653 + LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL) 1.1654 + : LIR_Op(code, LIR_OprFact::illegalOpr, info) 1.1655 + , _opr1(opr1) 1.1656 + , _opr2(opr2) 1.1657 + , _type(T_ILLEGAL) 1.1658 + , _condition(condition) 1.1659 + , _fpu_stack_size(0) 1.1660 + , _tmp1(LIR_OprFact::illegalOpr) 1.1661 + , _tmp2(LIR_OprFact::illegalOpr) 1.1662 + , _tmp3(LIR_OprFact::illegalOpr) 1.1663 + , _tmp4(LIR_OprFact::illegalOpr) 1.1664 + , _tmp5(LIR_OprFact::illegalOpr) { 1.1665 + assert(code == lir_cmp || code == lir_assert, "code check"); 1.1666 + } 1.1667 + 1.1668 + LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) 1.1669 + : LIR_Op(code, result, NULL) 1.1670 + , _opr1(opr1) 1.1671 + , _opr2(opr2) 1.1672 + , _type(type) 1.1673 + , _condition(condition) 1.1674 + , _fpu_stack_size(0) 1.1675 + , _tmp1(LIR_OprFact::illegalOpr) 1.1676 + , _tmp2(LIR_OprFact::illegalOpr) 1.1677 + , _tmp3(LIR_OprFact::illegalOpr) 1.1678 + , _tmp4(LIR_OprFact::illegalOpr) 1.1679 + , _tmp5(LIR_OprFact::illegalOpr) { 1.1680 + assert(code == lir_cmove, "code check"); 1.1681 + assert(type != T_ILLEGAL, "cmove should have type"); 1.1682 + } 1.1683 + 1.1684 + LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr, 1.1685 + CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL) 1.1686 + : LIR_Op(code, result, info) 1.1687 + , _opr1(opr1) 1.1688 + , _opr2(opr2) 1.1689 + , _type(type) 1.1690 + , _condition(lir_cond_unknown) 1.1691 + , _fpu_stack_size(0) 1.1692 + , _tmp1(LIR_OprFact::illegalOpr) 1.1693 + , _tmp2(LIR_OprFact::illegalOpr) 1.1694 + , _tmp3(LIR_OprFact::illegalOpr) 1.1695 + , _tmp4(LIR_OprFact::illegalOpr) 1.1696 + , _tmp5(LIR_OprFact::illegalOpr) { 1.1697 + assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); 1.1698 + } 1.1699 + 1.1700 + LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr, 1.1701 + LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr) 1.1702 + : LIR_Op(code, result, NULL) 1.1703 + , _opr1(opr1) 1.1704 + , _opr2(opr2) 1.1705 + , _type(T_ILLEGAL) 1.1706 + , _condition(lir_cond_unknown) 1.1707 + , _fpu_stack_size(0) 1.1708 + , _tmp1(tmp1) 1.1709 + , _tmp2(tmp2) 1.1710 + , _tmp3(tmp3) 1.1711 + , _tmp4(tmp4) 1.1712 + , _tmp5(tmp5) { 1.1713 + assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); 1.1714 + } 1.1715 + 1.1716 + LIR_Opr in_opr1() const { return _opr1; } 1.1717 + LIR_Opr in_opr2() const { return _opr2; } 1.1718 + BasicType type() const { return _type; } 1.1719 + LIR_Opr tmp1_opr() const { return _tmp1; } 1.1720 + LIR_Opr tmp2_opr() const { return _tmp2; } 1.1721 + LIR_Opr tmp3_opr() const { return _tmp3; } 1.1722 + LIR_Opr tmp4_opr() const { return _tmp4; } 1.1723 + LIR_Opr tmp5_opr() const { return _tmp5; } 1.1724 + LIR_Condition condition() const { 1.1725 + assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition; 1.1726 + } 1.1727 + void set_condition(LIR_Condition condition) { 1.1728 + assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; 1.1729 + } 1.1730 + 1.1731 + void set_fpu_stack_size(int size) { _fpu_stack_size = size; } 1.1732 + int fpu_stack_size() const { return _fpu_stack_size; } 1.1733 + 1.1734 + void set_in_opr1(LIR_Opr opr) { _opr1 = opr; } 1.1735 + void set_in_opr2(LIR_Opr opr) { _opr2 = opr; } 1.1736 + 1.1737 + virtual void emit_code(LIR_Assembler* masm); 1.1738 + virtual LIR_Op2* as_Op2() { return this; } 1.1739 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1740 +}; 1.1741 + 1.1742 +class LIR_OpAllocArray : public LIR_Op { 1.1743 + friend class LIR_OpVisitState; 1.1744 + 1.1745 + private: 1.1746 + LIR_Opr _klass; 1.1747 + LIR_Opr _len; 1.1748 + LIR_Opr _tmp1; 1.1749 + LIR_Opr _tmp2; 1.1750 + LIR_Opr _tmp3; 1.1751 + LIR_Opr _tmp4; 1.1752 + BasicType _type; 1.1753 + CodeStub* _stub; 1.1754 + 1.1755 + public: 1.1756 + LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub) 1.1757 + : LIR_Op(lir_alloc_array, result, NULL) 1.1758 + , _klass(klass) 1.1759 + , _len(len) 1.1760 + , _tmp1(t1) 1.1761 + , _tmp2(t2) 1.1762 + , _tmp3(t3) 1.1763 + , _tmp4(t4) 1.1764 + , _type(type) 1.1765 + , _stub(stub) {} 1.1766 + 1.1767 + LIR_Opr klass() const { return _klass; } 1.1768 + LIR_Opr len() const { return _len; } 1.1769 + LIR_Opr obj() const { return result_opr(); } 1.1770 + LIR_Opr tmp1() const { return _tmp1; } 1.1771 + LIR_Opr tmp2() const { return _tmp2; } 1.1772 + LIR_Opr tmp3() const { return _tmp3; } 1.1773 + LIR_Opr tmp4() const { return _tmp4; } 1.1774 + BasicType type() const { return _type; } 1.1775 + CodeStub* stub() const { return _stub; } 1.1776 + 1.1777 + virtual void emit_code(LIR_Assembler* masm); 1.1778 + virtual LIR_OpAllocArray * as_OpAllocArray () { return this; } 1.1779 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1780 +}; 1.1781 + 1.1782 + 1.1783 +class LIR_Op3: public LIR_Op { 1.1784 + friend class LIR_OpVisitState; 1.1785 + 1.1786 + private: 1.1787 + LIR_Opr _opr1; 1.1788 + LIR_Opr _opr2; 1.1789 + LIR_Opr _opr3; 1.1790 + public: 1.1791 + LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL) 1.1792 + : LIR_Op(code, result, info) 1.1793 + , _opr1(opr1) 1.1794 + , _opr2(opr2) 1.1795 + , _opr3(opr3) { assert(is_in_range(code, begin_op3, end_op3), "code check"); } 1.1796 + LIR_Opr in_opr1() const { return _opr1; } 1.1797 + LIR_Opr in_opr2() const { return _opr2; } 1.1798 + LIR_Opr in_opr3() const { return _opr3; } 1.1799 + 1.1800 + virtual void emit_code(LIR_Assembler* masm); 1.1801 + virtual LIR_Op3* as_Op3() { return this; } 1.1802 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1803 +}; 1.1804 + 1.1805 + 1.1806 +//-------------------------------- 1.1807 +class LabelObj: public CompilationResourceObj { 1.1808 + private: 1.1809 + Label _label; 1.1810 + public: 1.1811 + LabelObj() {} 1.1812 + Label* label() { return &_label; } 1.1813 +}; 1.1814 + 1.1815 + 1.1816 +class LIR_OpLock: public LIR_Op { 1.1817 + friend class LIR_OpVisitState; 1.1818 + 1.1819 + private: 1.1820 + LIR_Opr _hdr; 1.1821 + LIR_Opr _obj; 1.1822 + LIR_Opr _lock; 1.1823 + LIR_Opr _scratch; 1.1824 + CodeStub* _stub; 1.1825 + public: 1.1826 + LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info) 1.1827 + : LIR_Op(code, LIR_OprFact::illegalOpr, info) 1.1828 + , _hdr(hdr) 1.1829 + , _obj(obj) 1.1830 + , _lock(lock) 1.1831 + , _scratch(scratch) 1.1832 + , _stub(stub) {} 1.1833 + 1.1834 + LIR_Opr hdr_opr() const { return _hdr; } 1.1835 + LIR_Opr obj_opr() const { return _obj; } 1.1836 + LIR_Opr lock_opr() const { return _lock; } 1.1837 + LIR_Opr scratch_opr() const { return _scratch; } 1.1838 + CodeStub* stub() const { return _stub; } 1.1839 + 1.1840 + virtual void emit_code(LIR_Assembler* masm); 1.1841 + virtual LIR_OpLock* as_OpLock() { return this; } 1.1842 + void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1843 +}; 1.1844 + 1.1845 + 1.1846 +class LIR_OpDelay: public LIR_Op { 1.1847 + friend class LIR_OpVisitState; 1.1848 + 1.1849 + private: 1.1850 + LIR_Op* _op; 1.1851 + 1.1852 + public: 1.1853 + LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info): 1.1854 + LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info), 1.1855 + _op(op) { 1.1856 + assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops"); 1.1857 + } 1.1858 + virtual void emit_code(LIR_Assembler* masm); 1.1859 + virtual LIR_OpDelay* as_OpDelay() { return this; } 1.1860 + void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1861 + LIR_Op* delay_op() const { return _op; } 1.1862 + CodeEmitInfo* call_info() const { return info(); } 1.1863 +}; 1.1864 + 1.1865 +#ifdef ASSERT 1.1866 +// LIR_OpAssert 1.1867 +class LIR_OpAssert : public LIR_Op2 { 1.1868 + friend class LIR_OpVisitState; 1.1869 + 1.1870 + private: 1.1871 + const char* _msg; 1.1872 + bool _halt; 1.1873 + 1.1874 + public: 1.1875 + LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) 1.1876 + : LIR_Op2(lir_assert, condition, opr1, opr2) 1.1877 + , _halt(halt) 1.1878 + , _msg(msg) { 1.1879 + } 1.1880 + 1.1881 + const char* msg() const { return _msg; } 1.1882 + bool halt() const { return _halt; } 1.1883 + 1.1884 + virtual void emit_code(LIR_Assembler* masm); 1.1885 + virtual LIR_OpAssert* as_OpAssert() { return this; } 1.1886 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1887 +}; 1.1888 +#endif 1.1889 + 1.1890 +// LIR_OpCompareAndSwap 1.1891 +class LIR_OpCompareAndSwap : public LIR_Op { 1.1892 + friend class LIR_OpVisitState; 1.1893 + 1.1894 + private: 1.1895 + LIR_Opr _addr; 1.1896 + LIR_Opr _cmp_value; 1.1897 + LIR_Opr _new_value; 1.1898 + LIR_Opr _tmp1; 1.1899 + LIR_Opr _tmp2; 1.1900 + 1.1901 + public: 1.1902 + LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, 1.1903 + LIR_Opr t1, LIR_Opr t2, LIR_Opr result) 1.1904 + : LIR_Op(code, result, NULL) // no result, no info 1.1905 + , _addr(addr) 1.1906 + , _cmp_value(cmp_value) 1.1907 + , _new_value(new_value) 1.1908 + , _tmp1(t1) 1.1909 + , _tmp2(t2) { } 1.1910 + 1.1911 + LIR_Opr addr() const { return _addr; } 1.1912 + LIR_Opr cmp_value() const { return _cmp_value; } 1.1913 + LIR_Opr new_value() const { return _new_value; } 1.1914 + LIR_Opr tmp1() const { return _tmp1; } 1.1915 + LIR_Opr tmp2() const { return _tmp2; } 1.1916 + 1.1917 + virtual void emit_code(LIR_Assembler* masm); 1.1918 + virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; } 1.1919 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1920 +}; 1.1921 + 1.1922 +// LIR_OpProfileCall 1.1923 +class LIR_OpProfileCall : public LIR_Op { 1.1924 + friend class LIR_OpVisitState; 1.1925 + 1.1926 + private: 1.1927 + ciMethod* _profiled_method; 1.1928 + int _profiled_bci; 1.1929 + ciMethod* _profiled_callee; 1.1930 + LIR_Opr _mdo; 1.1931 + LIR_Opr _recv; 1.1932 + LIR_Opr _tmp1; 1.1933 + ciKlass* _known_holder; 1.1934 + 1.1935 + public: 1.1936 + // Destroys recv 1.1937 + LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) 1.1938 + : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL) // no result, no info 1.1939 + , _profiled_method(profiled_method) 1.1940 + , _profiled_bci(profiled_bci) 1.1941 + , _profiled_callee(profiled_callee) 1.1942 + , _mdo(mdo) 1.1943 + , _recv(recv) 1.1944 + , _tmp1(t1) 1.1945 + , _known_holder(known_holder) { } 1.1946 + 1.1947 + ciMethod* profiled_method() const { return _profiled_method; } 1.1948 + int profiled_bci() const { return _profiled_bci; } 1.1949 + ciMethod* profiled_callee() const { return _profiled_callee; } 1.1950 + LIR_Opr mdo() const { return _mdo; } 1.1951 + LIR_Opr recv() const { return _recv; } 1.1952 + LIR_Opr tmp1() const { return _tmp1; } 1.1953 + ciKlass* known_holder() const { return _known_holder; } 1.1954 + 1.1955 + virtual void emit_code(LIR_Assembler* masm); 1.1956 + virtual LIR_OpProfileCall* as_OpProfileCall() { return this; } 1.1957 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1958 +}; 1.1959 + 1.1960 +// LIR_OpProfileType 1.1961 +class LIR_OpProfileType : public LIR_Op { 1.1962 + friend class LIR_OpVisitState; 1.1963 + 1.1964 + private: 1.1965 + LIR_Opr _mdp; 1.1966 + LIR_Opr _obj; 1.1967 + LIR_Opr _tmp; 1.1968 + ciKlass* _exact_klass; // non NULL if we know the klass statically (no need to load it from _obj) 1.1969 + intptr_t _current_klass; // what the profiling currently reports 1.1970 + bool _not_null; // true if we know statically that _obj cannot be null 1.1971 + bool _no_conflict; // true if we're profling parameters, _exact_klass is not NULL and we know 1.1972 + // _exact_klass it the only possible type for this parameter in any context. 1.1973 + 1.1974 + public: 1.1975 + // Destroys recv 1.1976 + LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) 1.1977 + : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL) // no result, no info 1.1978 + , _mdp(mdp) 1.1979 + , _obj(obj) 1.1980 + , _exact_klass(exact_klass) 1.1981 + , _current_klass(current_klass) 1.1982 + , _tmp(tmp) 1.1983 + , _not_null(not_null) 1.1984 + , _no_conflict(no_conflict) { } 1.1985 + 1.1986 + LIR_Opr mdp() const { return _mdp; } 1.1987 + LIR_Opr obj() const { return _obj; } 1.1988 + LIR_Opr tmp() const { return _tmp; } 1.1989 + ciKlass* exact_klass() const { return _exact_klass; } 1.1990 + intptr_t current_klass() const { return _current_klass; } 1.1991 + bool not_null() const { return _not_null; } 1.1992 + bool no_conflict() const { return _no_conflict; } 1.1993 + 1.1994 + virtual void emit_code(LIR_Assembler* masm); 1.1995 + virtual LIR_OpProfileType* as_OpProfileType() { return this; } 1.1996 + virtual void print_instr(outputStream* out) const PRODUCT_RETURN; 1.1997 +}; 1.1998 + 1.1999 +class LIR_InsertionBuffer; 1.2000 + 1.2001 +//--------------------------------LIR_List--------------------------------------------------- 1.2002 +// Maintains a list of LIR instructions (one instance of LIR_List per basic block) 1.2003 +// The LIR instructions are appended by the LIR_List class itself; 1.2004 +// 1.2005 +// Notes: 1.2006 +// - all offsets are(should be) in bytes 1.2007 +// - local positions are specified with an offset, with offset 0 being local 0 1.2008 + 1.2009 +class LIR_List: public CompilationResourceObj { 1.2010 + private: 1.2011 + LIR_OpList _operations; 1.2012 + 1.2013 + Compilation* _compilation; 1.2014 +#ifndef PRODUCT 1.2015 + BlockBegin* _block; 1.2016 +#endif 1.2017 +#ifdef ASSERT 1.2018 + const char * _file; 1.2019 + int _line; 1.2020 +#endif 1.2021 + 1.2022 + void append(LIR_Op* op) { 1.2023 + if (op->source() == NULL) 1.2024 + op->set_source(_compilation->current_instruction()); 1.2025 +#ifndef PRODUCT 1.2026 + if (PrintIRWithLIR) { 1.2027 + _compilation->maybe_print_current_instruction(); 1.2028 + op->print(); tty->cr(); 1.2029 + } 1.2030 +#endif // PRODUCT 1.2031 + 1.2032 + _operations.append(op); 1.2033 + 1.2034 +#ifdef ASSERT 1.2035 + op->verify(); 1.2036 + op->set_file_and_line(_file, _line); 1.2037 + _file = NULL; 1.2038 + _line = 0; 1.2039 +#endif 1.2040 + } 1.2041 + 1.2042 + public: 1.2043 + LIR_List(Compilation* compilation, BlockBegin* block = NULL); 1.2044 + 1.2045 +#ifdef ASSERT 1.2046 + void set_file_and_line(const char * file, int line); 1.2047 +#endif 1.2048 + 1.2049 + //---------- accessors --------------- 1.2050 + LIR_OpList* instructions_list() { return &_operations; } 1.2051 + int length() const { return _operations.length(); } 1.2052 + LIR_Op* at(int i) const { return _operations.at(i); } 1.2053 + 1.2054 + NOT_PRODUCT(BlockBegin* block() const { return _block; }); 1.2055 + 1.2056 + // insert LIR_Ops in buffer to right places in LIR_List 1.2057 + void append(LIR_InsertionBuffer* buffer); 1.2058 + 1.2059 + //---------- mutators --------------- 1.2060 + void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); } 1.2061 + void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); } 1.2062 + void remove_at(int i) { _operations.remove_at(i); } 1.2063 + 1.2064 + //---------- printing ------------- 1.2065 + void print_instructions() PRODUCT_RETURN; 1.2066 + 1.2067 + 1.2068 + //---------- instructions ------------- 1.2069 + void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result, 1.2070 + address dest, LIR_OprList* arguments, 1.2071 + CodeEmitInfo* info) { 1.2072 + append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info)); 1.2073 + } 1.2074 + void call_static(ciMethod* method, LIR_Opr result, 1.2075 + address dest, LIR_OprList* arguments, CodeEmitInfo* info) { 1.2076 + append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info)); 1.2077 + } 1.2078 + void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result, 1.2079 + address dest, LIR_OprList* arguments, CodeEmitInfo* info) { 1.2080 + append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info)); 1.2081 + } 1.2082 + void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result, 1.2083 + intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) { 1.2084 + append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info)); 1.2085 + } 1.2086 + void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result, 1.2087 + address dest, LIR_OprList* arguments, CodeEmitInfo* info) { 1.2088 + append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info)); 1.2089 + } 1.2090 + 1.2091 + void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); } 1.2092 + void word_align() { append(new LIR_Op0(lir_word_align)); } 1.2093 + void membar() { append(new LIR_Op0(lir_membar)); } 1.2094 + void membar_acquire() { append(new LIR_Op0(lir_membar_acquire)); } 1.2095 + void membar_release() { append(new LIR_Op0(lir_membar_release)); } 1.2096 + void membar_loadload() { append(new LIR_Op0(lir_membar_loadload)); } 1.2097 + void membar_storestore() { append(new LIR_Op0(lir_membar_storestore)); } 1.2098 + void membar_loadstore() { append(new LIR_Op0(lir_membar_loadstore)); } 1.2099 + void membar_storeload() { append(new LIR_Op0(lir_membar_storeload)); } 1.2100 + 1.2101 + void nop() { append(new LIR_Op0(lir_nop)); } 1.2102 + void build_frame() { append(new LIR_Op0(lir_build_frame)); } 1.2103 + 1.2104 + void std_entry(LIR_Opr receiver) { append(new LIR_Op0(lir_std_entry, receiver)); } 1.2105 + void osr_entry(LIR_Opr osrPointer) { append(new LIR_Op0(lir_osr_entry, osrPointer)); } 1.2106 + 1.2107 + void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); } 1.2108 + 1.2109 + void negate(LIR_Opr from, LIR_Opr to) { append(new LIR_Op1(lir_neg, from, to)); } 1.2110 + void leal(LIR_Opr from, LIR_Opr result_reg) { append(new LIR_Op1(lir_leal, from, result_reg)); } 1.2111 + 1.2112 + // result is a stack location for old backend and vreg for UseLinearScan 1.2113 + // stack_loc_temp is an illegal register for old backend 1.2114 + void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); } 1.2115 + void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); } 1.2116 + void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); } 1.2117 + void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); } 1.2118 + void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); } 1.2119 + void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); } 1.2120 + void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); } 1.2121 + void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { 1.2122 + if (UseCompressedOops) { 1.2123 + append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide)); 1.2124 + } else { 1.2125 + move(src, dst, info); 1.2126 + } 1.2127 + } 1.2128 + void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { 1.2129 + if (UseCompressedOops) { 1.2130 + append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide)); 1.2131 + } else { 1.2132 + move(src, dst, info); 1.2133 + } 1.2134 + } 1.2135 + void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); } 1.2136 + 1.2137 + void oop2reg (jobject o, LIR_Opr reg) { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); } 1.2138 + void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info); 1.2139 + 1.2140 + void metadata2reg (Metadata* o, LIR_Opr reg) { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg)); } 1.2141 + void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info); 1.2142 + 1.2143 + void return_op(LIR_Opr result) { append(new LIR_Op1(lir_return, result)); } 1.2144 + 1.2145 + void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); } 1.2146 + 1.2147 +#ifdef PPC 1.2148 + void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); } 1.2149 +#endif 1.2150 + void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); } 1.2151 + 1.2152 + void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and, left, right, dst)); } 1.2153 + void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); } 1.2154 + void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); } 1.2155 + 1.2156 + void pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64, src, dst, T_LONG, lir_patch_none, NULL)); } 1.2157 + void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); } 1.2158 + 1.2159 + void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); } 1.2160 + void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1.2161 + append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); 1.2162 + } 1.2163 + void unwind_exception(LIR_Opr exceptionOop) { 1.2164 + append(new LIR_Op1(lir_unwind, exceptionOop)); 1.2165 + } 1.2166 + 1.2167 + void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1.2168 + append(new LIR_Op2(lir_compare_to, left, right, dst)); 1.2169 + } 1.2170 + 1.2171 + void push(LIR_Opr opr) { append(new LIR_Op1(lir_push, opr)); } 1.2172 + void pop(LIR_Opr reg) { append(new LIR_Op1(lir_pop, reg)); } 1.2173 + 1.2174 + void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) { 1.2175 + append(new LIR_Op2(lir_cmp, condition, left, right, info)); 1.2176 + } 1.2177 + void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) { 1.2178 + cmp(condition, left, LIR_OprFact::intConst(right), info); 1.2179 + } 1.2180 + 1.2181 + void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); 1.2182 + void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info); 1.2183 + 1.2184 + void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { 1.2185 + append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type)); 1.2186 + } 1.2187 + 1.2188 + void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, 1.2189 + LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); 1.2190 + void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, 1.2191 + LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); 1.2192 + void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, 1.2193 + LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); 1.2194 + 1.2195 + void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_abs , from, tmp, to)); } 1.2196 + void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_sqrt, from, tmp, to)); } 1.2197 + void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_log, from, LIR_OprFact::illegalOpr, to, tmp)); } 1.2198 + void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); } 1.2199 + void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); } 1.2200 + void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); } 1.2201 + void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); } 1.2202 + void exp (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5) { append(new LIR_Op2(lir_exp , from, tmp1, to, tmp2, tmp3, tmp4, tmp5)); } 1.2203 + void pow (LIR_Opr arg1, LIR_Opr arg2, LIR_Opr res, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5) { append(new LIR_Op2(lir_pow, arg1, arg2, res, tmp1, tmp2, tmp3, tmp4, tmp5)); } 1.2204 + 1.2205 + void add (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_add, left, right, res)); } 1.2206 + void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); } 1.2207 + void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); } 1.2208 + void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); } 1.2209 + void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_div, left, right, res, info)); } 1.2210 + void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); } 1.2211 + void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_rem, left, right, res, info)); } 1.2212 + 1.2213 + void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); 1.2214 + void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code); 1.2215 + 1.2216 + void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none); 1.2217 + 1.2218 + void prefetch(LIR_Address* addr, bool is_store); 1.2219 + 1.2220 + void store_mem_int(jint v, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); 1.2221 + void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); 1.2222 + void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none); 1.2223 + void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); 1.2224 + void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code); 1.2225 + 1.2226 + void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); 1.2227 + void idiv(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); 1.2228 + void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); 1.2229 + void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); 1.2230 + 1.2231 + void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub); 1.2232 + void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub); 1.2233 + 1.2234 + // jump is an unconditional branch 1.2235 + void jump(BlockBegin* block) { 1.2236 + append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block)); 1.2237 + } 1.2238 + void jump(CodeStub* stub) { 1.2239 + append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub)); 1.2240 + } 1.2241 + void branch(LIR_Condition cond, BasicType type, Label* lbl) { append(new LIR_OpBranch(cond, type, lbl)); } 1.2242 + void branch(LIR_Condition cond, BasicType type, BlockBegin* block) { 1.2243 + assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons"); 1.2244 + append(new LIR_OpBranch(cond, type, block)); 1.2245 + } 1.2246 + void branch(LIR_Condition cond, BasicType type, CodeStub* stub) { 1.2247 + assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons"); 1.2248 + append(new LIR_OpBranch(cond, type, stub)); 1.2249 + } 1.2250 + void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) { 1.2251 + assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only"); 1.2252 + append(new LIR_OpBranch(cond, type, block, unordered)); 1.2253 + } 1.2254 + 1.2255 + void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp); 1.2256 + void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp); 1.2257 + void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp); 1.2258 + 1.2259 + void shift_left(LIR_Opr value, int count, LIR_Opr dst) { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); } 1.2260 + void shift_right(LIR_Opr value, int count, LIR_Opr dst) { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); } 1.2261 + void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); } 1.2262 + 1.2263 + void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_cmp_l2i, left, right, dst)); } 1.2264 + void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less); 1.2265 + 1.2266 + void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) { 1.2267 + append(new LIR_OpRTCall(routine, tmp, result, arguments)); 1.2268 + } 1.2269 + 1.2270 + void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result, 1.2271 + LIR_OprList* arguments, CodeEmitInfo* info) { 1.2272 + append(new LIR_OpRTCall(routine, tmp, result, arguments, info)); 1.2273 + } 1.2274 + 1.2275 + void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); } 1.2276 + void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub); 1.2277 + void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info); 1.2278 + 1.2279 + void set_24bit_fpu() { append(new LIR_Op0(lir_24bit_FPU )); } 1.2280 + void restore_fpu() { append(new LIR_Op0(lir_reset_FPU )); } 1.2281 + void breakpoint() { append(new LIR_Op0(lir_breakpoint)); } 1.2282 + 1.2283 + void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); } 1.2284 + 1.2285 + void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); } 1.2286 + 1.2287 + void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); } 1.2288 + 1.2289 + void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci); 1.2290 + void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci); 1.2291 + 1.2292 + void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, 1.2293 + LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, 1.2294 + CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, 1.2295 + ciMethod* profiled_method, int profiled_bci); 1.2296 + // MethodData* profiling 1.2297 + void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { 1.2298 + append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass)); 1.2299 + } 1.2300 + void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) { 1.2301 + append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict)); 1.2302 + } 1.2303 + 1.2304 + void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); } 1.2305 + void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); } 1.2306 +#ifdef ASSERT 1.2307 + void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); } 1.2308 +#endif 1.2309 +}; 1.2310 + 1.2311 +void print_LIR(BlockList* blocks); 1.2312 + 1.2313 +class LIR_InsertionBuffer : public CompilationResourceObj { 1.2314 + private: 1.2315 + LIR_List* _lir; // the lir list where ops of this buffer should be inserted later (NULL when uninitialized) 1.2316 + 1.2317 + // list of insertion points. index and count are stored alternately: 1.2318 + // _index_and_count[i * 2]: the index into lir list where "count" ops should be inserted 1.2319 + // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index 1.2320 + intStack _index_and_count; 1.2321 + 1.2322 + // the LIR_Ops to be inserted 1.2323 + LIR_OpList _ops; 1.2324 + 1.2325 + void append_new(int index, int count) { _index_and_count.append(index); _index_and_count.append(count); } 1.2326 + void set_index_at(int i, int value) { _index_and_count.at_put((i << 1), value); } 1.2327 + void set_count_at(int i, int value) { _index_and_count.at_put((i << 1) + 1, value); } 1.2328 + 1.2329 +#ifdef ASSERT 1.2330 + void verify(); 1.2331 +#endif 1.2332 + public: 1.2333 + LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { } 1.2334 + 1.2335 + // must be called before using the insertion buffer 1.2336 + void init(LIR_List* lir) { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); } 1.2337 + bool initialized() const { return _lir != NULL; } 1.2338 + // called automatically when the buffer is appended to the LIR_List 1.2339 + void finish() { _lir = NULL; } 1.2340 + 1.2341 + // accessors 1.2342 + LIR_List* lir_list() const { return _lir; } 1.2343 + int number_of_insertion_points() const { return _index_and_count.length() >> 1; } 1.2344 + int index_at(int i) const { return _index_and_count.at((i << 1)); } 1.2345 + int count_at(int i) const { return _index_and_count.at((i << 1) + 1); } 1.2346 + 1.2347 + int number_of_ops() const { return _ops.length(); } 1.2348 + LIR_Op* op_at(int i) const { return _ops.at(i); } 1.2349 + 1.2350 + // append an instruction to the buffer 1.2351 + void append(int index, LIR_Op* op); 1.2352 + 1.2353 + // instruction 1.2354 + void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); } 1.2355 +}; 1.2356 + 1.2357 + 1.2358 +// 1.2359 +// LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way. 1.2360 +// Calling a LIR_Op's visit function with a LIR_OpVisitState causes 1.2361 +// information about the input, output and temporaries used by the 1.2362 +// op to be recorded. It also records whether the op has call semantics 1.2363 +// and also records all the CodeEmitInfos used by this op. 1.2364 +// 1.2365 + 1.2366 + 1.2367 +class LIR_OpVisitState: public StackObj { 1.2368 + public: 1.2369 + typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode; 1.2370 + 1.2371 + enum { 1.2372 + maxNumberOfOperands = 20, 1.2373 + maxNumberOfInfos = 4 1.2374 + }; 1.2375 + 1.2376 + private: 1.2377 + LIR_Op* _op; 1.2378 + 1.2379 + // optimization: the operands and infos are not stored in a variable-length 1.2380 + // list, but in a fixed-size array to save time of size checks and resizing 1.2381 + int _oprs_len[numModes]; 1.2382 + LIR_Opr* _oprs_new[numModes][maxNumberOfOperands]; 1.2383 + int _info_len; 1.2384 + CodeEmitInfo* _info_new[maxNumberOfInfos]; 1.2385 + 1.2386 + bool _has_call; 1.2387 + bool _has_slow_case; 1.2388 + 1.2389 + 1.2390 + // only include register operands 1.2391 + // addresses are decomposed to the base and index registers 1.2392 + // constants and stack operands are ignored 1.2393 + void append(LIR_Opr& opr, OprMode mode) { 1.2394 + assert(opr->is_valid(), "should not call this otherwise"); 1.2395 + assert(mode >= 0 && mode < numModes, "bad mode"); 1.2396 + 1.2397 + if (opr->is_register()) { 1.2398 + assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow"); 1.2399 + _oprs_new[mode][_oprs_len[mode]++] = &opr; 1.2400 + 1.2401 + } else if (opr->is_pointer()) { 1.2402 + LIR_Address* address = opr->as_address_ptr(); 1.2403 + if (address != NULL) { 1.2404 + // special handling for addresses: add base and index register of the address 1.2405 + // both are always input operands or temp if we want to extend 1.2406 + // their liveness! 1.2407 + if (mode == outputMode) { 1.2408 + mode = inputMode; 1.2409 + } 1.2410 + assert (mode == inputMode || mode == tempMode, "input or temp only for addresses"); 1.2411 + if (address->_base->is_valid()) { 1.2412 + assert(address->_base->is_register(), "must be"); 1.2413 + assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow"); 1.2414 + _oprs_new[mode][_oprs_len[mode]++] = &address->_base; 1.2415 + } 1.2416 + if (address->_index->is_valid()) { 1.2417 + assert(address->_index->is_register(), "must be"); 1.2418 + assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow"); 1.2419 + _oprs_new[mode][_oprs_len[mode]++] = &address->_index; 1.2420 + } 1.2421 + 1.2422 + } else { 1.2423 + assert(opr->is_constant(), "constant operands are not processed"); 1.2424 + } 1.2425 + } else { 1.2426 + assert(opr->is_stack(), "stack operands are not processed"); 1.2427 + } 1.2428 + } 1.2429 + 1.2430 + void append(CodeEmitInfo* info) { 1.2431 + assert(info != NULL, "should not call this otherwise"); 1.2432 + assert(_info_len < maxNumberOfInfos, "array overflow"); 1.2433 + _info_new[_info_len++] = info; 1.2434 + } 1.2435 + 1.2436 + public: 1.2437 + LIR_OpVisitState() { reset(); } 1.2438 + 1.2439 + LIR_Op* op() const { return _op; } 1.2440 + void set_op(LIR_Op* op) { reset(); _op = op; } 1.2441 + 1.2442 + bool has_call() const { return _has_call; } 1.2443 + bool has_slow_case() const { return _has_slow_case; } 1.2444 + 1.2445 + void reset() { 1.2446 + _op = NULL; 1.2447 + _has_call = false; 1.2448 + _has_slow_case = false; 1.2449 + 1.2450 + _oprs_len[inputMode] = 0; 1.2451 + _oprs_len[tempMode] = 0; 1.2452 + _oprs_len[outputMode] = 0; 1.2453 + _info_len = 0; 1.2454 + } 1.2455 + 1.2456 + 1.2457 + int opr_count(OprMode mode) const { 1.2458 + assert(mode >= 0 && mode < numModes, "bad mode"); 1.2459 + return _oprs_len[mode]; 1.2460 + } 1.2461 + 1.2462 + LIR_Opr opr_at(OprMode mode, int index) const { 1.2463 + assert(mode >= 0 && mode < numModes, "bad mode"); 1.2464 + assert(index >= 0 && index < _oprs_len[mode], "index out of bound"); 1.2465 + return *_oprs_new[mode][index]; 1.2466 + } 1.2467 + 1.2468 + void set_opr_at(OprMode mode, int index, LIR_Opr opr) const { 1.2469 + assert(mode >= 0 && mode < numModes, "bad mode"); 1.2470 + assert(index >= 0 && index < _oprs_len[mode], "index out of bound"); 1.2471 + *_oprs_new[mode][index] = opr; 1.2472 + } 1.2473 + 1.2474 + int info_count() const { 1.2475 + return _info_len; 1.2476 + } 1.2477 + 1.2478 + CodeEmitInfo* info_at(int index) const { 1.2479 + assert(index < _info_len, "index out of bounds"); 1.2480 + return _info_new[index]; 1.2481 + } 1.2482 + 1.2483 + XHandlers* all_xhandler(); 1.2484 + 1.2485 + // collects all register operands of the instruction 1.2486 + void visit(LIR_Op* op); 1.2487 + 1.2488 +#ifdef ASSERT 1.2489 + // check that an operation has no operands 1.2490 + bool no_operands(LIR_Op* op); 1.2491 +#endif 1.2492 + 1.2493 + // LIR_Op visitor functions use these to fill in the state 1.2494 + void do_input(LIR_Opr& opr) { append(opr, LIR_OpVisitState::inputMode); } 1.2495 + void do_output(LIR_Opr& opr) { append(opr, LIR_OpVisitState::outputMode); } 1.2496 + void do_temp(LIR_Opr& opr) { append(opr, LIR_OpVisitState::tempMode); } 1.2497 + void do_info(CodeEmitInfo* info) { append(info); } 1.2498 + 1.2499 + void do_stub(CodeStub* stub); 1.2500 + void do_call() { _has_call = true; } 1.2501 + void do_slow_case() { _has_slow_case = true; } 1.2502 + void do_slow_case(CodeEmitInfo* info) { 1.2503 + _has_slow_case = true; 1.2504 + append(info); 1.2505 + } 1.2506 +}; 1.2507 + 1.2508 + 1.2509 +inline LIR_Opr LIR_OprDesc::illegalOpr() { return LIR_OprFact::illegalOpr; }; 1.2510 + 1.2511 +#endif // SHARE_VM_C1_C1_LIR_HPP