Fri, 07 Jan 2011 10:42:32 -0500
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
Summary: Track allocated bytes in Thread's, update on TLAB retirement and direct allocation in Eden and tenured, add JNI methods for ThreadMXBean.
Reviewed-by: coleenp, kvn, dholmes, ysr
duke@435 | 1 | /* |
iveresov@2138 | 2 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "c1/c1_Compilation.hpp" |
stefank@2314 | 27 | #include "c1/c1_FrameMap.hpp" |
stefank@2314 | 28 | #include "c1/c1_Instruction.hpp" |
stefank@2314 | 29 | #include "c1/c1_LIRAssembler.hpp" |
stefank@2314 | 30 | #include "c1/c1_LIRGenerator.hpp" |
stefank@2314 | 31 | #include "c1/c1_Runtime1.hpp" |
stefank@2314 | 32 | #include "c1/c1_ValueStack.hpp" |
stefank@2314 | 33 | #include "ci/ciArray.hpp" |
stefank@2314 | 34 | #include "ci/ciObjArrayKlass.hpp" |
stefank@2314 | 35 | #include "ci/ciTypeArrayKlass.hpp" |
stefank@2314 | 36 | #include "runtime/sharedRuntime.hpp" |
stefank@2314 | 37 | #include "runtime/stubRoutines.hpp" |
stefank@2314 | 38 | #include "vmreg_x86.inline.hpp" |
duke@435 | 39 | |
duke@435 | 40 | #ifdef ASSERT |
duke@435 | 41 | #define __ gen()->lir(__FILE__, __LINE__)-> |
duke@435 | 42 | #else |
duke@435 | 43 | #define __ gen()->lir()-> |
duke@435 | 44 | #endif |
duke@435 | 45 | |
duke@435 | 46 | // Item will be loaded into a byte register; Intel only |
duke@435 | 47 | void LIRItem::load_byte_item() { |
duke@435 | 48 | load_item(); |
duke@435 | 49 | LIR_Opr res = result(); |
duke@435 | 50 | |
duke@435 | 51 | if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { |
duke@435 | 52 | // make sure that it is a byte register |
duke@435 | 53 | assert(!value()->type()->is_float() && !value()->type()->is_double(), |
duke@435 | 54 | "can't load floats in byte register"); |
duke@435 | 55 | LIR_Opr reg = _gen->rlock_byte(T_BYTE); |
duke@435 | 56 | __ move(res, reg); |
duke@435 | 57 | |
duke@435 | 58 | _result = reg; |
duke@435 | 59 | } |
duke@435 | 60 | } |
duke@435 | 61 | |
duke@435 | 62 | |
duke@435 | 63 | void LIRItem::load_nonconstant() { |
duke@435 | 64 | LIR_Opr r = value()->operand(); |
duke@435 | 65 | if (r->is_constant()) { |
duke@435 | 66 | _result = r; |
duke@435 | 67 | } else { |
duke@435 | 68 | load_item(); |
duke@435 | 69 | } |
duke@435 | 70 | } |
duke@435 | 71 | |
duke@435 | 72 | //-------------------------------------------------------------- |
duke@435 | 73 | // LIRGenerator |
duke@435 | 74 | //-------------------------------------------------------------- |
duke@435 | 75 | |
duke@435 | 76 | |
duke@435 | 77 | LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::rax_oop_opr; } |
duke@435 | 78 | LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::rdx_opr; } |
duke@435 | 79 | LIR_Opr LIRGenerator::divInOpr() { return FrameMap::rax_opr; } |
duke@435 | 80 | LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::rax_opr; } |
duke@435 | 81 | LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::rdx_opr; } |
duke@435 | 82 | LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::rcx_opr; } |
duke@435 | 83 | LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::rax_opr; } |
duke@435 | 84 | LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } |
duke@435 | 85 | |
duke@435 | 86 | |
duke@435 | 87 | LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { |
duke@435 | 88 | LIR_Opr opr; |
duke@435 | 89 | switch (type->tag()) { |
duke@435 | 90 | case intTag: opr = FrameMap::rax_opr; break; |
duke@435 | 91 | case objectTag: opr = FrameMap::rax_oop_opr; break; |
never@739 | 92 | case longTag: opr = FrameMap::long0_opr; break; |
duke@435 | 93 | case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; |
duke@435 | 94 | case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; |
duke@435 | 95 | |
duke@435 | 96 | case addressTag: |
duke@435 | 97 | default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; |
duke@435 | 98 | } |
duke@435 | 99 | |
duke@435 | 100 | assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); |
duke@435 | 101 | return opr; |
duke@435 | 102 | } |
duke@435 | 103 | |
duke@435 | 104 | |
duke@435 | 105 | LIR_Opr LIRGenerator::rlock_byte(BasicType type) { |
duke@435 | 106 | LIR_Opr reg = new_register(T_INT); |
duke@435 | 107 | set_vreg_flag(reg, LIRGenerator::byte_reg); |
duke@435 | 108 | return reg; |
duke@435 | 109 | } |
duke@435 | 110 | |
duke@435 | 111 | |
duke@435 | 112 | //--------- loading items into registers -------------------------------- |
duke@435 | 113 | |
duke@435 | 114 | |
duke@435 | 115 | // i486 instructions can inline constants |
duke@435 | 116 | bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const { |
duke@435 | 117 | if (type == T_SHORT || type == T_CHAR) { |
duke@435 | 118 | // there is no immediate move of word values in asembler_i486.?pp |
duke@435 | 119 | return false; |
duke@435 | 120 | } |
duke@435 | 121 | Constant* c = v->as_Constant(); |
roland@2174 | 122 | if (c && c->state_before() == NULL) { |
duke@435 | 123 | // constants of any type can be stored directly, except for |
duke@435 | 124 | // unloaded object constants. |
duke@435 | 125 | return true; |
duke@435 | 126 | } |
duke@435 | 127 | return false; |
duke@435 | 128 | } |
duke@435 | 129 | |
duke@435 | 130 | |
duke@435 | 131 | bool LIRGenerator::can_inline_as_constant(Value v) const { |
never@739 | 132 | if (v->type()->tag() == longTag) return false; |
duke@435 | 133 | return v->type()->tag() != objectTag || |
duke@435 | 134 | (v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object()); |
duke@435 | 135 | } |
duke@435 | 136 | |
duke@435 | 137 | |
duke@435 | 138 | bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { |
never@739 | 139 | if (c->type() == T_LONG) return false; |
duke@435 | 140 | return c->type() != T_OBJECT || c->as_jobject() == NULL; |
duke@435 | 141 | } |
duke@435 | 142 | |
duke@435 | 143 | |
duke@435 | 144 | LIR_Opr LIRGenerator::safepoint_poll_register() { |
duke@435 | 145 | return LIR_OprFact::illegalOpr; |
duke@435 | 146 | } |
duke@435 | 147 | |
duke@435 | 148 | |
duke@435 | 149 | LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index, |
duke@435 | 150 | int shift, int disp, BasicType type) { |
duke@435 | 151 | assert(base->is_register(), "must be"); |
duke@435 | 152 | if (index->is_constant()) { |
duke@435 | 153 | return new LIR_Address(base, |
duke@435 | 154 | (index->as_constant_ptr()->as_jint() << shift) + disp, |
duke@435 | 155 | type); |
duke@435 | 156 | } else { |
duke@435 | 157 | return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type); |
duke@435 | 158 | } |
duke@435 | 159 | } |
duke@435 | 160 | |
duke@435 | 161 | |
duke@435 | 162 | LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, |
duke@435 | 163 | BasicType type, bool needs_card_mark) { |
duke@435 | 164 | int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type); |
duke@435 | 165 | |
duke@435 | 166 | LIR_Address* addr; |
duke@435 | 167 | if (index_opr->is_constant()) { |
kvn@464 | 168 | int elem_size = type2aelembytes(type); |
duke@435 | 169 | addr = new LIR_Address(array_opr, |
duke@435 | 170 | offset_in_bytes + index_opr->as_jint() * elem_size, type); |
duke@435 | 171 | } else { |
never@739 | 172 | #ifdef _LP64 |
never@739 | 173 | if (index_opr->type() == T_INT) { |
never@739 | 174 | LIR_Opr tmp = new_register(T_LONG); |
never@739 | 175 | __ convert(Bytecodes::_i2l, index_opr, tmp); |
never@739 | 176 | index_opr = tmp; |
never@739 | 177 | } |
never@739 | 178 | #endif // _LP64 |
duke@435 | 179 | addr = new LIR_Address(array_opr, |
duke@435 | 180 | index_opr, |
duke@435 | 181 | LIR_Address::scale(type), |
duke@435 | 182 | offset_in_bytes, type); |
duke@435 | 183 | } |
duke@435 | 184 | if (needs_card_mark) { |
duke@435 | 185 | // This store will need a precise card mark, so go ahead and |
duke@435 | 186 | // compute the full adddres instead of computing once for the |
duke@435 | 187 | // store and again for the card mark. |
never@739 | 188 | LIR_Opr tmp = new_pointer_register(); |
duke@435 | 189 | __ leal(LIR_OprFact::address(addr), tmp); |
iveresov@1927 | 190 | return new LIR_Address(tmp, type); |
duke@435 | 191 | } else { |
duke@435 | 192 | return addr; |
duke@435 | 193 | } |
duke@435 | 194 | } |
duke@435 | 195 | |
duke@435 | 196 | |
iveresov@2138 | 197 | LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) { |
iveresov@2138 | 198 | LIR_Opr r; |
iveresov@2138 | 199 | if (type == T_LONG) { |
iveresov@2138 | 200 | r = LIR_OprFact::longConst(x); |
iveresov@2138 | 201 | } else if (type == T_INT) { |
iveresov@2138 | 202 | r = LIR_OprFact::intConst(x); |
iveresov@2138 | 203 | } else { |
iveresov@2138 | 204 | ShouldNotReachHere(); |
iveresov@2138 | 205 | } |
iveresov@2138 | 206 | return r; |
iveresov@2138 | 207 | } |
iveresov@2138 | 208 | |
iveresov@2138 | 209 | void LIRGenerator::increment_counter(address counter, BasicType type, int step) { |
never@739 | 210 | LIR_Opr pointer = new_pointer_register(); |
never@739 | 211 | __ move(LIR_OprFact::intptrConst(counter), pointer); |
iveresov@2138 | 212 | LIR_Address* addr = new LIR_Address(pointer, type); |
duke@435 | 213 | increment_counter(addr, step); |
duke@435 | 214 | } |
duke@435 | 215 | |
duke@435 | 216 | |
duke@435 | 217 | void LIRGenerator::increment_counter(LIR_Address* addr, int step) { |
duke@435 | 218 | __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr); |
duke@435 | 219 | } |
duke@435 | 220 | |
duke@435 | 221 | void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) { |
duke@435 | 222 | __ cmp_mem_int(condition, base, disp, c, info); |
duke@435 | 223 | } |
duke@435 | 224 | |
duke@435 | 225 | |
duke@435 | 226 | void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) { |
duke@435 | 227 | __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); |
duke@435 | 228 | } |
duke@435 | 229 | |
duke@435 | 230 | |
duke@435 | 231 | void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) { |
duke@435 | 232 | __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info); |
duke@435 | 233 | } |
duke@435 | 234 | |
duke@435 | 235 | |
duke@435 | 236 | bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) { |
duke@435 | 237 | if (tmp->is_valid()) { |
duke@435 | 238 | if (is_power_of_2(c + 1)) { |
duke@435 | 239 | __ move(left, tmp); |
duke@435 | 240 | __ shift_left(left, log2_intptr(c + 1), left); |
duke@435 | 241 | __ sub(left, tmp, result); |
duke@435 | 242 | return true; |
duke@435 | 243 | } else if (is_power_of_2(c - 1)) { |
duke@435 | 244 | __ move(left, tmp); |
duke@435 | 245 | __ shift_left(left, log2_intptr(c - 1), left); |
duke@435 | 246 | __ add(left, tmp, result); |
duke@435 | 247 | return true; |
duke@435 | 248 | } |
duke@435 | 249 | } |
duke@435 | 250 | return false; |
duke@435 | 251 | } |
duke@435 | 252 | |
duke@435 | 253 | |
duke@435 | 254 | void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { |
duke@435 | 255 | BasicType type = item->type(); |
duke@435 | 256 | __ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type)); |
duke@435 | 257 | } |
duke@435 | 258 | |
duke@435 | 259 | //---------------------------------------------------------------------- |
duke@435 | 260 | // visitor functions |
duke@435 | 261 | //---------------------------------------------------------------------- |
duke@435 | 262 | |
duke@435 | 263 | |
duke@435 | 264 | void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { |
roland@2174 | 265 | assert(x->is_pinned(),""); |
duke@435 | 266 | bool needs_range_check = true; |
duke@435 | 267 | bool use_length = x->length() != NULL; |
duke@435 | 268 | bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; |
duke@435 | 269 | bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || |
duke@435 | 270 | !get_jobject_constant(x->value())->is_null_object()); |
duke@435 | 271 | |
duke@435 | 272 | LIRItem array(x->array(), this); |
duke@435 | 273 | LIRItem index(x->index(), this); |
duke@435 | 274 | LIRItem value(x->value(), this); |
duke@435 | 275 | LIRItem length(this); |
duke@435 | 276 | |
duke@435 | 277 | array.load_item(); |
duke@435 | 278 | index.load_nonconstant(); |
duke@435 | 279 | |
duke@435 | 280 | if (use_length) { |
duke@435 | 281 | needs_range_check = x->compute_needs_range_check(); |
duke@435 | 282 | if (needs_range_check) { |
duke@435 | 283 | length.set_instruction(x->length()); |
duke@435 | 284 | length.load_item(); |
duke@435 | 285 | } |
duke@435 | 286 | } |
duke@435 | 287 | if (needs_store_check) { |
duke@435 | 288 | value.load_item(); |
duke@435 | 289 | } else { |
duke@435 | 290 | value.load_for_store(x->elt_type()); |
duke@435 | 291 | } |
duke@435 | 292 | |
duke@435 | 293 | set_no_result(x); |
duke@435 | 294 | |
duke@435 | 295 | // the CodeEmitInfo must be duplicated for each different |
duke@435 | 296 | // LIR-instruction because spilling can occur anywhere between two |
duke@435 | 297 | // instructions and so the debug information must be different |
duke@435 | 298 | CodeEmitInfo* range_check_info = state_for(x); |
duke@435 | 299 | CodeEmitInfo* null_check_info = NULL; |
duke@435 | 300 | if (x->needs_null_check()) { |
duke@435 | 301 | null_check_info = new CodeEmitInfo(range_check_info); |
duke@435 | 302 | } |
duke@435 | 303 | |
duke@435 | 304 | // emit array address setup early so it schedules better |
duke@435 | 305 | LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); |
duke@435 | 306 | |
duke@435 | 307 | if (GenerateRangeChecks && needs_range_check) { |
duke@435 | 308 | if (use_length) { |
duke@435 | 309 | __ cmp(lir_cond_belowEqual, length.result(), index.result()); |
duke@435 | 310 | __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); |
duke@435 | 311 | } else { |
duke@435 | 312 | array_range_check(array.result(), index.result(), null_check_info, range_check_info); |
duke@435 | 313 | // range_check also does the null check |
duke@435 | 314 | null_check_info = NULL; |
duke@435 | 315 | } |
duke@435 | 316 | } |
duke@435 | 317 | |
duke@435 | 318 | if (GenerateArrayStoreCheck && needs_store_check) { |
duke@435 | 319 | LIR_Opr tmp1 = new_register(objectType); |
duke@435 | 320 | LIR_Opr tmp2 = new_register(objectType); |
duke@435 | 321 | LIR_Opr tmp3 = new_register(objectType); |
duke@435 | 322 | |
duke@435 | 323 | CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); |
duke@435 | 324 | __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info); |
duke@435 | 325 | } |
duke@435 | 326 | |
duke@435 | 327 | if (obj_store) { |
ysr@777 | 328 | // Needs GC write barriers. |
ysr@777 | 329 | pre_barrier(LIR_OprFact::address(array_addr), false, NULL); |
duke@435 | 330 | __ move(value.result(), array_addr, null_check_info); |
duke@435 | 331 | // Seems to be a precise |
duke@435 | 332 | post_barrier(LIR_OprFact::address(array_addr), value.result()); |
duke@435 | 333 | } else { |
duke@435 | 334 | __ move(value.result(), array_addr, null_check_info); |
duke@435 | 335 | } |
duke@435 | 336 | } |
duke@435 | 337 | |
duke@435 | 338 | |
duke@435 | 339 | void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { |
roland@2174 | 340 | assert(x->is_pinned(),""); |
duke@435 | 341 | LIRItem obj(x->obj(), this); |
duke@435 | 342 | obj.load_item(); |
duke@435 | 343 | |
duke@435 | 344 | set_no_result(x); |
duke@435 | 345 | |
duke@435 | 346 | // "lock" stores the address of the monitor stack slot, so this is not an oop |
duke@435 | 347 | LIR_Opr lock = new_register(T_INT); |
duke@435 | 348 | // Need a scratch register for biased locking on x86 |
duke@435 | 349 | LIR_Opr scratch = LIR_OprFact::illegalOpr; |
duke@435 | 350 | if (UseBiasedLocking) { |
duke@435 | 351 | scratch = new_register(T_INT); |
duke@435 | 352 | } |
duke@435 | 353 | |
duke@435 | 354 | CodeEmitInfo* info_for_exception = NULL; |
duke@435 | 355 | if (x->needs_null_check()) { |
roland@2174 | 356 | info_for_exception = state_for(x); |
duke@435 | 357 | } |
duke@435 | 358 | // this CodeEmitInfo must not have the xhandlers because here the |
duke@435 | 359 | // object is already locked (xhandlers expect object to be unlocked) |
duke@435 | 360 | CodeEmitInfo* info = state_for(x, x->state(), true); |
duke@435 | 361 | monitor_enter(obj.result(), lock, syncTempOpr(), scratch, |
duke@435 | 362 | x->monitor_no(), info_for_exception, info); |
duke@435 | 363 | } |
duke@435 | 364 | |
duke@435 | 365 | |
duke@435 | 366 | void LIRGenerator::do_MonitorExit(MonitorExit* x) { |
roland@2174 | 367 | assert(x->is_pinned(),""); |
duke@435 | 368 | |
duke@435 | 369 | LIRItem obj(x->obj(), this); |
duke@435 | 370 | obj.dont_load_item(); |
duke@435 | 371 | |
duke@435 | 372 | LIR_Opr lock = new_register(T_INT); |
duke@435 | 373 | LIR_Opr obj_temp = new_register(T_INT); |
duke@435 | 374 | set_no_result(x); |
bobv@2036 | 375 | monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); |
duke@435 | 376 | } |
duke@435 | 377 | |
duke@435 | 378 | |
duke@435 | 379 | // _ineg, _lneg, _fneg, _dneg |
duke@435 | 380 | void LIRGenerator::do_NegateOp(NegateOp* x) { |
duke@435 | 381 | LIRItem value(x->x(), this); |
duke@435 | 382 | value.set_destroys_register(); |
duke@435 | 383 | value.load_item(); |
duke@435 | 384 | LIR_Opr reg = rlock(x); |
duke@435 | 385 | __ negate(value.result(), reg); |
duke@435 | 386 | |
duke@435 | 387 | set_result(x, round_item(reg)); |
duke@435 | 388 | } |
duke@435 | 389 | |
duke@435 | 390 | |
duke@435 | 391 | // for _fadd, _fmul, _fsub, _fdiv, _frem |
duke@435 | 392 | // _dadd, _dmul, _dsub, _ddiv, _drem |
duke@435 | 393 | void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { |
duke@435 | 394 | LIRItem left(x->x(), this); |
duke@435 | 395 | LIRItem right(x->y(), this); |
duke@435 | 396 | LIRItem* left_arg = &left; |
duke@435 | 397 | LIRItem* right_arg = &right; |
duke@435 | 398 | assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); |
duke@435 | 399 | bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); |
duke@435 | 400 | if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { |
duke@435 | 401 | left.load_item(); |
duke@435 | 402 | } else { |
duke@435 | 403 | left.dont_load_item(); |
duke@435 | 404 | } |
duke@435 | 405 | |
duke@435 | 406 | // do not load right operand if it is a constant. only 0 and 1 are |
duke@435 | 407 | // loaded because there are special instructions for loading them |
duke@435 | 408 | // without memory access (not needed for SSE2 instructions) |
duke@435 | 409 | bool must_load_right = false; |
duke@435 | 410 | if (right.is_constant()) { |
duke@435 | 411 | LIR_Const* c = right.result()->as_constant_ptr(); |
duke@435 | 412 | assert(c != NULL, "invalid constant"); |
duke@435 | 413 | assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); |
duke@435 | 414 | |
duke@435 | 415 | if (c->type() == T_FLOAT) { |
duke@435 | 416 | must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float()); |
duke@435 | 417 | } else { |
duke@435 | 418 | must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); |
duke@435 | 419 | } |
duke@435 | 420 | } |
duke@435 | 421 | |
duke@435 | 422 | if (must_load_both) { |
duke@435 | 423 | // frem and drem destroy also right operand, so move it to a new register |
duke@435 | 424 | right.set_destroys_register(); |
duke@435 | 425 | right.load_item(); |
duke@435 | 426 | } else if (right.is_register() || must_load_right) { |
duke@435 | 427 | right.load_item(); |
duke@435 | 428 | } else { |
duke@435 | 429 | right.dont_load_item(); |
duke@435 | 430 | } |
duke@435 | 431 | LIR_Opr reg = rlock(x); |
duke@435 | 432 | LIR_Opr tmp = LIR_OprFact::illegalOpr; |
duke@435 | 433 | if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { |
duke@435 | 434 | tmp = new_register(T_DOUBLE); |
duke@435 | 435 | } |
duke@435 | 436 | |
duke@435 | 437 | if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) { |
duke@435 | 438 | // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots |
duke@435 | 439 | LIR_Opr fpu0, fpu1; |
duke@435 | 440 | if (x->op() == Bytecodes::_frem) { |
duke@435 | 441 | fpu0 = LIR_OprFact::single_fpu(0); |
duke@435 | 442 | fpu1 = LIR_OprFact::single_fpu(1); |
duke@435 | 443 | } else { |
duke@435 | 444 | fpu0 = LIR_OprFact::double_fpu(0); |
duke@435 | 445 | fpu1 = LIR_OprFact::double_fpu(1); |
duke@435 | 446 | } |
duke@435 | 447 | __ move(right.result(), fpu1); // order of left and right operand is important! |
duke@435 | 448 | __ move(left.result(), fpu0); |
duke@435 | 449 | __ rem (fpu0, fpu1, fpu0); |
duke@435 | 450 | __ move(fpu0, reg); |
duke@435 | 451 | |
duke@435 | 452 | } else { |
duke@435 | 453 | arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp); |
duke@435 | 454 | } |
duke@435 | 455 | |
duke@435 | 456 | set_result(x, round_item(reg)); |
duke@435 | 457 | } |
duke@435 | 458 | |
duke@435 | 459 | |
duke@435 | 460 | // for _ladd, _lmul, _lsub, _ldiv, _lrem |
duke@435 | 461 | void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { |
duke@435 | 462 | if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { |
duke@435 | 463 | // long division is implemented as a direct call into the runtime |
duke@435 | 464 | LIRItem left(x->x(), this); |
duke@435 | 465 | LIRItem right(x->y(), this); |
duke@435 | 466 | |
duke@435 | 467 | // the check for division by zero destroys the right operand |
duke@435 | 468 | right.set_destroys_register(); |
duke@435 | 469 | |
duke@435 | 470 | BasicTypeList signature(2); |
duke@435 | 471 | signature.append(T_LONG); |
duke@435 | 472 | signature.append(T_LONG); |
duke@435 | 473 | CallingConvention* cc = frame_map()->c_calling_convention(&signature); |
duke@435 | 474 | |
duke@435 | 475 | // check for division by zero (destroys registers of right operand!) |
duke@435 | 476 | CodeEmitInfo* info = state_for(x); |
duke@435 | 477 | |
duke@435 | 478 | const LIR_Opr result_reg = result_register_for(x->type()); |
duke@435 | 479 | left.load_item_force(cc->at(1)); |
duke@435 | 480 | right.load_item(); |
duke@435 | 481 | |
duke@435 | 482 | __ move(right.result(), cc->at(0)); |
duke@435 | 483 | |
duke@435 | 484 | __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); |
duke@435 | 485 | __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); |
duke@435 | 486 | |
duke@435 | 487 | address entry; |
duke@435 | 488 | switch (x->op()) { |
duke@435 | 489 | case Bytecodes::_lrem: |
duke@435 | 490 | entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); |
duke@435 | 491 | break; // check if dividend is 0 is done elsewhere |
duke@435 | 492 | case Bytecodes::_ldiv: |
duke@435 | 493 | entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); |
duke@435 | 494 | break; // check if dividend is 0 is done elsewhere |
duke@435 | 495 | case Bytecodes::_lmul: |
duke@435 | 496 | entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); |
duke@435 | 497 | break; |
duke@435 | 498 | default: |
duke@435 | 499 | ShouldNotReachHere(); |
duke@435 | 500 | } |
duke@435 | 501 | |
duke@435 | 502 | LIR_Opr result = rlock_result(x); |
duke@435 | 503 | __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); |
duke@435 | 504 | __ move(result_reg, result); |
duke@435 | 505 | } else if (x->op() == Bytecodes::_lmul) { |
duke@435 | 506 | // missing test if instr is commutative and if we should swap |
duke@435 | 507 | LIRItem left(x->x(), this); |
duke@435 | 508 | LIRItem right(x->y(), this); |
duke@435 | 509 | |
duke@435 | 510 | // right register is destroyed by the long mul, so it must be |
duke@435 | 511 | // copied to a new register. |
duke@435 | 512 | right.set_destroys_register(); |
duke@435 | 513 | |
duke@435 | 514 | left.load_item(); |
duke@435 | 515 | right.load_item(); |
duke@435 | 516 | |
never@739 | 517 | LIR_Opr reg = FrameMap::long0_opr; |
duke@435 | 518 | arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); |
duke@435 | 519 | LIR_Opr result = rlock_result(x); |
duke@435 | 520 | __ move(reg, result); |
duke@435 | 521 | } else { |
duke@435 | 522 | // missing test if instr is commutative and if we should swap |
duke@435 | 523 | LIRItem left(x->x(), this); |
duke@435 | 524 | LIRItem right(x->y(), this); |
duke@435 | 525 | |
duke@435 | 526 | left.load_item(); |
twisti@1040 | 527 | // don't load constants to save register |
duke@435 | 528 | right.load_nonconstant(); |
duke@435 | 529 | rlock_result(x); |
duke@435 | 530 | arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); |
duke@435 | 531 | } |
duke@435 | 532 | } |
duke@435 | 533 | |
duke@435 | 534 | |
duke@435 | 535 | |
duke@435 | 536 | // for: _iadd, _imul, _isub, _idiv, _irem |
duke@435 | 537 | void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { |
duke@435 | 538 | if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { |
duke@435 | 539 | // The requirements for division and modulo |
duke@435 | 540 | // input : rax,: dividend min_int |
duke@435 | 541 | // reg: divisor (may not be rax,/rdx) -1 |
duke@435 | 542 | // |
duke@435 | 543 | // output: rax,: quotient (= rax, idiv reg) min_int |
duke@435 | 544 | // rdx: remainder (= rax, irem reg) 0 |
duke@435 | 545 | |
duke@435 | 546 | // rax, and rdx will be destroyed |
duke@435 | 547 | |
duke@435 | 548 | // Note: does this invalidate the spec ??? |
duke@435 | 549 | LIRItem right(x->y(), this); |
duke@435 | 550 | LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid |
duke@435 | 551 | |
duke@435 | 552 | // call state_for before load_item_force because state_for may |
duke@435 | 553 | // force the evaluation of other instructions that are needed for |
duke@435 | 554 | // correct debug info. Otherwise the live range of the fix |
duke@435 | 555 | // register might be too long. |
duke@435 | 556 | CodeEmitInfo* info = state_for(x); |
duke@435 | 557 | |
duke@435 | 558 | left.load_item_force(divInOpr()); |
duke@435 | 559 | |
duke@435 | 560 | right.load_item(); |
duke@435 | 561 | |
duke@435 | 562 | LIR_Opr result = rlock_result(x); |
duke@435 | 563 | LIR_Opr result_reg; |
duke@435 | 564 | if (x->op() == Bytecodes::_idiv) { |
duke@435 | 565 | result_reg = divOutOpr(); |
duke@435 | 566 | } else { |
duke@435 | 567 | result_reg = remOutOpr(); |
duke@435 | 568 | } |
duke@435 | 569 | |
duke@435 | 570 | if (!ImplicitDiv0Checks) { |
duke@435 | 571 | __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); |
duke@435 | 572 | __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); |
duke@435 | 573 | } |
duke@435 | 574 | LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation |
duke@435 | 575 | if (x->op() == Bytecodes::_irem) { |
duke@435 | 576 | __ irem(left.result(), right.result(), result_reg, tmp, info); |
duke@435 | 577 | } else if (x->op() == Bytecodes::_idiv) { |
duke@435 | 578 | __ idiv(left.result(), right.result(), result_reg, tmp, info); |
duke@435 | 579 | } else { |
duke@435 | 580 | ShouldNotReachHere(); |
duke@435 | 581 | } |
duke@435 | 582 | |
duke@435 | 583 | __ move(result_reg, result); |
duke@435 | 584 | } else { |
duke@435 | 585 | // missing test if instr is commutative and if we should swap |
duke@435 | 586 | LIRItem left(x->x(), this); |
duke@435 | 587 | LIRItem right(x->y(), this); |
duke@435 | 588 | LIRItem* left_arg = &left; |
duke@435 | 589 | LIRItem* right_arg = &right; |
duke@435 | 590 | if (x->is_commutative() && left.is_stack() && right.is_register()) { |
duke@435 | 591 | // swap them if left is real stack (or cached) and right is real register(not cached) |
duke@435 | 592 | left_arg = &right; |
duke@435 | 593 | right_arg = &left; |
duke@435 | 594 | } |
duke@435 | 595 | |
duke@435 | 596 | left_arg->load_item(); |
duke@435 | 597 | |
duke@435 | 598 | // do not need to load right, as we can handle stack and constants |
duke@435 | 599 | if (x->op() == Bytecodes::_imul ) { |
duke@435 | 600 | // check if we can use shift instead |
duke@435 | 601 | bool use_constant = false; |
duke@435 | 602 | bool use_tmp = false; |
duke@435 | 603 | if (right_arg->is_constant()) { |
duke@435 | 604 | int iconst = right_arg->get_jint_constant(); |
duke@435 | 605 | if (iconst > 0) { |
duke@435 | 606 | if (is_power_of_2(iconst)) { |
duke@435 | 607 | use_constant = true; |
duke@435 | 608 | } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { |
duke@435 | 609 | use_constant = true; |
duke@435 | 610 | use_tmp = true; |
duke@435 | 611 | } |
duke@435 | 612 | } |
duke@435 | 613 | } |
duke@435 | 614 | if (use_constant) { |
duke@435 | 615 | right_arg->dont_load_item(); |
duke@435 | 616 | } else { |
duke@435 | 617 | right_arg->load_item(); |
duke@435 | 618 | } |
duke@435 | 619 | LIR_Opr tmp = LIR_OprFact::illegalOpr; |
duke@435 | 620 | if (use_tmp) { |
duke@435 | 621 | tmp = new_register(T_INT); |
duke@435 | 622 | } |
duke@435 | 623 | rlock_result(x); |
duke@435 | 624 | |
duke@435 | 625 | arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); |
duke@435 | 626 | } else { |
duke@435 | 627 | right_arg->dont_load_item(); |
duke@435 | 628 | rlock_result(x); |
duke@435 | 629 | LIR_Opr tmp = LIR_OprFact::illegalOpr; |
duke@435 | 630 | arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); |
duke@435 | 631 | } |
duke@435 | 632 | } |
duke@435 | 633 | } |
duke@435 | 634 | |
duke@435 | 635 | |
duke@435 | 636 | void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) { |
duke@435 | 637 | // when an operand with use count 1 is the left operand, then it is |
duke@435 | 638 | // likely that no move for 2-operand-LIR-form is necessary |
duke@435 | 639 | if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { |
duke@435 | 640 | x->swap_operands(); |
duke@435 | 641 | } |
duke@435 | 642 | |
duke@435 | 643 | ValueTag tag = x->type()->tag(); |
duke@435 | 644 | assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters"); |
duke@435 | 645 | switch (tag) { |
duke@435 | 646 | case floatTag: |
duke@435 | 647 | case doubleTag: do_ArithmeticOp_FPU(x); return; |
duke@435 | 648 | case longTag: do_ArithmeticOp_Long(x); return; |
duke@435 | 649 | case intTag: do_ArithmeticOp_Int(x); return; |
duke@435 | 650 | } |
duke@435 | 651 | ShouldNotReachHere(); |
duke@435 | 652 | } |
duke@435 | 653 | |
duke@435 | 654 | |
duke@435 | 655 | // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr |
duke@435 | 656 | void LIRGenerator::do_ShiftOp(ShiftOp* x) { |
duke@435 | 657 | // count must always be in rcx |
duke@435 | 658 | LIRItem value(x->x(), this); |
duke@435 | 659 | LIRItem count(x->y(), this); |
duke@435 | 660 | |
duke@435 | 661 | ValueTag elemType = x->type()->tag(); |
duke@435 | 662 | bool must_load_count = !count.is_constant() || elemType == longTag; |
duke@435 | 663 | if (must_load_count) { |
duke@435 | 664 | // count for long must be in register |
duke@435 | 665 | count.load_item_force(shiftCountOpr()); |
duke@435 | 666 | } else { |
duke@435 | 667 | count.dont_load_item(); |
duke@435 | 668 | } |
duke@435 | 669 | value.load_item(); |
duke@435 | 670 | LIR_Opr reg = rlock_result(x); |
duke@435 | 671 | |
duke@435 | 672 | shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); |
duke@435 | 673 | } |
duke@435 | 674 | |
duke@435 | 675 | |
duke@435 | 676 | // _iand, _land, _ior, _lor, _ixor, _lxor |
duke@435 | 677 | void LIRGenerator::do_LogicOp(LogicOp* x) { |
duke@435 | 678 | // when an operand with use count 1 is the left operand, then it is |
duke@435 | 679 | // likely that no move for 2-operand-LIR-form is necessary |
duke@435 | 680 | if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { |
duke@435 | 681 | x->swap_operands(); |
duke@435 | 682 | } |
duke@435 | 683 | |
duke@435 | 684 | LIRItem left(x->x(), this); |
duke@435 | 685 | LIRItem right(x->y(), this); |
duke@435 | 686 | |
duke@435 | 687 | left.load_item(); |
duke@435 | 688 | right.load_nonconstant(); |
duke@435 | 689 | LIR_Opr reg = rlock_result(x); |
duke@435 | 690 | |
duke@435 | 691 | logic_op(x->op(), reg, left.result(), right.result()); |
duke@435 | 692 | } |
duke@435 | 693 | |
duke@435 | 694 | |
duke@435 | 695 | |
duke@435 | 696 | // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg |
duke@435 | 697 | void LIRGenerator::do_CompareOp(CompareOp* x) { |
duke@435 | 698 | LIRItem left(x->x(), this); |
duke@435 | 699 | LIRItem right(x->y(), this); |
duke@435 | 700 | ValueTag tag = x->x()->type()->tag(); |
duke@435 | 701 | if (tag == longTag) { |
duke@435 | 702 | left.set_destroys_register(); |
duke@435 | 703 | } |
duke@435 | 704 | left.load_item(); |
duke@435 | 705 | right.load_item(); |
duke@435 | 706 | LIR_Opr reg = rlock_result(x); |
duke@435 | 707 | |
duke@435 | 708 | if (x->x()->type()->is_float_kind()) { |
duke@435 | 709 | Bytecodes::Code code = x->op(); |
duke@435 | 710 | __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); |
duke@435 | 711 | } else if (x->x()->type()->tag() == longTag) { |
duke@435 | 712 | __ lcmp2int(left.result(), right.result(), reg); |
duke@435 | 713 | } else { |
duke@435 | 714 | Unimplemented(); |
duke@435 | 715 | } |
duke@435 | 716 | } |
duke@435 | 717 | |
duke@435 | 718 | |
duke@435 | 719 | void LIRGenerator::do_AttemptUpdate(Intrinsic* x) { |
duke@435 | 720 | assert(x->number_of_arguments() == 3, "wrong type"); |
duke@435 | 721 | LIRItem obj (x->argument_at(0), this); // AtomicLong object |
duke@435 | 722 | LIRItem cmp_value (x->argument_at(1), this); // value to compare with field |
duke@435 | 723 | LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value |
duke@435 | 724 | |
duke@435 | 725 | // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction |
never@739 | 726 | cmp_value.load_item_force(FrameMap::long0_opr); |
duke@435 | 727 | |
duke@435 | 728 | // new value must be in rcx,ebx (hi,lo) |
never@739 | 729 | new_value.load_item_force(FrameMap::long1_opr); |
duke@435 | 730 | |
duke@435 | 731 | // object pointer register is overwritten with field address |
duke@435 | 732 | obj.load_item(); |
duke@435 | 733 | |
duke@435 | 734 | // generate compare-and-swap; produces zero condition if swap occurs |
duke@435 | 735 | int value_offset = sun_misc_AtomicLongCSImpl::value_offset(); |
duke@435 | 736 | LIR_Opr addr = obj.result(); |
duke@435 | 737 | __ add(addr, LIR_OprFact::intConst(value_offset), addr); |
duke@435 | 738 | LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed |
duke@435 | 739 | LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed |
duke@435 | 740 | __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2); |
duke@435 | 741 | |
duke@435 | 742 | // generate conditional move of boolean result |
duke@435 | 743 | LIR_Opr result = rlock_result(x); |
duke@435 | 744 | __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result); |
duke@435 | 745 | } |
duke@435 | 746 | |
duke@435 | 747 | |
duke@435 | 748 | void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { |
duke@435 | 749 | assert(x->number_of_arguments() == 4, "wrong type"); |
duke@435 | 750 | LIRItem obj (x->argument_at(0), this); // object |
duke@435 | 751 | LIRItem offset(x->argument_at(1), this); // offset of field |
duke@435 | 752 | LIRItem cmp (x->argument_at(2), this); // value to compare with field |
duke@435 | 753 | LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp |
duke@435 | 754 | |
duke@435 | 755 | assert(obj.type()->tag() == objectTag, "invalid type"); |
never@739 | 756 | |
never@739 | 757 | // In 64bit the type can be long, sparc doesn't have this assert |
never@739 | 758 | // assert(offset.type()->tag() == intTag, "invalid type"); |
never@739 | 759 | |
duke@435 | 760 | assert(cmp.type()->tag() == type->tag(), "invalid type"); |
duke@435 | 761 | assert(val.type()->tag() == type->tag(), "invalid type"); |
duke@435 | 762 | |
duke@435 | 763 | // get address of field |
duke@435 | 764 | obj.load_item(); |
duke@435 | 765 | offset.load_nonconstant(); |
duke@435 | 766 | |
duke@435 | 767 | if (type == objectType) { |
duke@435 | 768 | cmp.load_item_force(FrameMap::rax_oop_opr); |
duke@435 | 769 | val.load_item(); |
duke@435 | 770 | } else if (type == intType) { |
duke@435 | 771 | cmp.load_item_force(FrameMap::rax_opr); |
duke@435 | 772 | val.load_item(); |
duke@435 | 773 | } else if (type == longType) { |
never@739 | 774 | cmp.load_item_force(FrameMap::long0_opr); |
never@739 | 775 | val.load_item_force(FrameMap::long1_opr); |
duke@435 | 776 | } else { |
duke@435 | 777 | ShouldNotReachHere(); |
duke@435 | 778 | } |
duke@435 | 779 | |
never@2228 | 780 | LIR_Opr addr = new_pointer_register(); |
roland@1495 | 781 | LIR_Address* a; |
roland@1495 | 782 | if(offset.result()->is_constant()) { |
roland@1495 | 783 | a = new LIR_Address(obj.result(), |
roland@1495 | 784 | NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()), |
roland@1495 | 785 | as_BasicType(type)); |
roland@1495 | 786 | } else { |
roland@1495 | 787 | a = new LIR_Address(obj.result(), |
roland@1495 | 788 | offset.result(), |
roland@1495 | 789 | LIR_Address::times_1, |
roland@1495 | 790 | 0, |
roland@1495 | 791 | as_BasicType(type)); |
roland@1495 | 792 | } |
roland@1495 | 793 | __ leal(LIR_OprFact::address(a), addr); |
duke@435 | 794 | |
ysr@777 | 795 | if (type == objectType) { // Write-barrier needed for Object fields. |
ysr@777 | 796 | // Do the pre-write barrier, if any. |
ysr@777 | 797 | pre_barrier(addr, false, NULL); |
ysr@777 | 798 | } |
duke@435 | 799 | |
duke@435 | 800 | LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience |
duke@435 | 801 | if (type == objectType) |
duke@435 | 802 | __ cas_obj(addr, cmp.result(), val.result(), ill, ill); |
duke@435 | 803 | else if (type == intType) |
duke@435 | 804 | __ cas_int(addr, cmp.result(), val.result(), ill, ill); |
duke@435 | 805 | else if (type == longType) |
duke@435 | 806 | __ cas_long(addr, cmp.result(), val.result(), ill, ill); |
duke@435 | 807 | else { |
duke@435 | 808 | ShouldNotReachHere(); |
duke@435 | 809 | } |
duke@435 | 810 | |
duke@435 | 811 | // generate conditional move of boolean result |
duke@435 | 812 | LIR_Opr result = rlock_result(x); |
duke@435 | 813 | __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result); |
duke@435 | 814 | if (type == objectType) { // Write-barrier needed for Object fields. |
duke@435 | 815 | // Seems to be precise |
duke@435 | 816 | post_barrier(addr, val.result()); |
duke@435 | 817 | } |
duke@435 | 818 | } |
duke@435 | 819 | |
duke@435 | 820 | |
duke@435 | 821 | void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { |
duke@435 | 822 | assert(x->number_of_arguments() == 1, "wrong type"); |
duke@435 | 823 | LIRItem value(x->argument_at(0), this); |
duke@435 | 824 | |
duke@435 | 825 | bool use_fpu = false; |
duke@435 | 826 | if (UseSSE >= 2) { |
duke@435 | 827 | switch(x->id()) { |
duke@435 | 828 | case vmIntrinsics::_dsin: |
duke@435 | 829 | case vmIntrinsics::_dcos: |
duke@435 | 830 | case vmIntrinsics::_dtan: |
duke@435 | 831 | case vmIntrinsics::_dlog: |
duke@435 | 832 | case vmIntrinsics::_dlog10: |
duke@435 | 833 | use_fpu = true; |
duke@435 | 834 | } |
duke@435 | 835 | } else { |
duke@435 | 836 | value.set_destroys_register(); |
duke@435 | 837 | } |
duke@435 | 838 | |
duke@435 | 839 | value.load_item(); |
duke@435 | 840 | |
duke@435 | 841 | LIR_Opr calc_input = value.result(); |
duke@435 | 842 | LIR_Opr calc_result = rlock_result(x); |
duke@435 | 843 | |
duke@435 | 844 | // sin and cos need two free fpu stack slots, so register two temporary operands |
duke@435 | 845 | LIR_Opr tmp1 = FrameMap::caller_save_fpu_reg_at(0); |
duke@435 | 846 | LIR_Opr tmp2 = FrameMap::caller_save_fpu_reg_at(1); |
duke@435 | 847 | |
duke@435 | 848 | if (use_fpu) { |
duke@435 | 849 | LIR_Opr tmp = FrameMap::fpu0_double_opr; |
duke@435 | 850 | __ move(calc_input, tmp); |
duke@435 | 851 | |
duke@435 | 852 | calc_input = tmp; |
duke@435 | 853 | calc_result = tmp; |
duke@435 | 854 | tmp1 = FrameMap::caller_save_fpu_reg_at(1); |
duke@435 | 855 | tmp2 = FrameMap::caller_save_fpu_reg_at(2); |
duke@435 | 856 | } |
duke@435 | 857 | |
duke@435 | 858 | switch(x->id()) { |
duke@435 | 859 | case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, LIR_OprFact::illegalOpr); break; |
duke@435 | 860 | case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break; |
duke@435 | 861 | case vmIntrinsics::_dsin: __ sin (calc_input, calc_result, tmp1, tmp2); break; |
duke@435 | 862 | case vmIntrinsics::_dcos: __ cos (calc_input, calc_result, tmp1, tmp2); break; |
duke@435 | 863 | case vmIntrinsics::_dtan: __ tan (calc_input, calc_result, tmp1, tmp2); break; |
never@1388 | 864 | case vmIntrinsics::_dlog: __ log (calc_input, calc_result, tmp1); break; |
never@1388 | 865 | case vmIntrinsics::_dlog10: __ log10(calc_input, calc_result, tmp1); break; |
duke@435 | 866 | default: ShouldNotReachHere(); |
duke@435 | 867 | } |
duke@435 | 868 | |
duke@435 | 869 | if (use_fpu) { |
duke@435 | 870 | __ move(calc_result, x->operand()); |
duke@435 | 871 | } |
duke@435 | 872 | } |
duke@435 | 873 | |
duke@435 | 874 | |
duke@435 | 875 | void LIRGenerator::do_ArrayCopy(Intrinsic* x) { |
duke@435 | 876 | assert(x->number_of_arguments() == 5, "wrong type"); |
never@2347 | 877 | |
never@2347 | 878 | // Make all state_for calls early since they can emit code |
never@2347 | 879 | CodeEmitInfo* info = state_for(x, x->state()); |
never@2347 | 880 | |
duke@435 | 881 | LIRItem src(x->argument_at(0), this); |
duke@435 | 882 | LIRItem src_pos(x->argument_at(1), this); |
duke@435 | 883 | LIRItem dst(x->argument_at(2), this); |
duke@435 | 884 | LIRItem dst_pos(x->argument_at(3), this); |
duke@435 | 885 | LIRItem length(x->argument_at(4), this); |
duke@435 | 886 | |
duke@435 | 887 | // operands for arraycopy must use fixed registers, otherwise |
duke@435 | 888 | // LinearScan will fail allocation (because arraycopy always needs a |
duke@435 | 889 | // call) |
never@739 | 890 | |
never@739 | 891 | #ifndef _LP64 |
duke@435 | 892 | src.load_item_force (FrameMap::rcx_oop_opr); |
duke@435 | 893 | src_pos.load_item_force (FrameMap::rdx_opr); |
duke@435 | 894 | dst.load_item_force (FrameMap::rax_oop_opr); |
duke@435 | 895 | dst_pos.load_item_force (FrameMap::rbx_opr); |
duke@435 | 896 | length.load_item_force (FrameMap::rdi_opr); |
duke@435 | 897 | LIR_Opr tmp = (FrameMap::rsi_opr); |
never@739 | 898 | #else |
never@739 | 899 | |
never@739 | 900 | // The java calling convention will give us enough registers |
never@739 | 901 | // so that on the stub side the args will be perfect already. |
never@739 | 902 | // On the other slow/special case side we call C and the arg |
never@739 | 903 | // positions are not similar enough to pick one as the best. |
never@739 | 904 | // Also because the java calling convention is a "shifted" version |
never@739 | 905 | // of the C convention we can process the java args trivially into C |
never@739 | 906 | // args without worry of overwriting during the xfer |
never@739 | 907 | |
never@739 | 908 | src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); |
never@739 | 909 | src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); |
never@739 | 910 | dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); |
never@739 | 911 | dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); |
never@739 | 912 | length.load_item_force (FrameMap::as_opr(j_rarg4)); |
never@739 | 913 | |
never@739 | 914 | LIR_Opr tmp = FrameMap::as_opr(j_rarg5); |
never@739 | 915 | #endif // LP64 |
never@739 | 916 | |
duke@435 | 917 | set_no_result(x); |
duke@435 | 918 | |
duke@435 | 919 | int flags; |
duke@435 | 920 | ciArrayKlass* expected_type; |
duke@435 | 921 | arraycopy_helper(x, &flags, &expected_type); |
duke@435 | 922 | |
duke@435 | 923 | __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint |
duke@435 | 924 | } |
duke@435 | 925 | |
duke@435 | 926 | |
duke@435 | 927 | // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f |
duke@435 | 928 | // _i2b, _i2c, _i2s |
duke@435 | 929 | LIR_Opr fixed_register_for(BasicType type) { |
duke@435 | 930 | switch (type) { |
duke@435 | 931 | case T_FLOAT: return FrameMap::fpu0_float_opr; |
duke@435 | 932 | case T_DOUBLE: return FrameMap::fpu0_double_opr; |
duke@435 | 933 | case T_INT: return FrameMap::rax_opr; |
never@739 | 934 | case T_LONG: return FrameMap::long0_opr; |
duke@435 | 935 | default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; |
duke@435 | 936 | } |
duke@435 | 937 | } |
duke@435 | 938 | |
duke@435 | 939 | void LIRGenerator::do_Convert(Convert* x) { |
duke@435 | 940 | // flags that vary for the different operations and different SSE-settings |
duke@435 | 941 | bool fixed_input, fixed_result, round_result, needs_stub; |
duke@435 | 942 | |
duke@435 | 943 | switch (x->op()) { |
duke@435 | 944 | case Bytecodes::_i2l: // fall through |
duke@435 | 945 | case Bytecodes::_l2i: // fall through |
duke@435 | 946 | case Bytecodes::_i2b: // fall through |
duke@435 | 947 | case Bytecodes::_i2c: // fall through |
duke@435 | 948 | case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; |
duke@435 | 949 | |
duke@435 | 950 | case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break; |
duke@435 | 951 | case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break; |
duke@435 | 952 | case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break; |
duke@435 | 953 | case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; |
duke@435 | 954 | case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; |
duke@435 | 955 | case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; |
duke@435 | 956 | case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break; |
duke@435 | 957 | case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break; |
duke@435 | 958 | case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; |
duke@435 | 959 | case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; |
duke@435 | 960 | default: ShouldNotReachHere(); |
duke@435 | 961 | } |
duke@435 | 962 | |
duke@435 | 963 | LIRItem value(x->value(), this); |
duke@435 | 964 | value.load_item(); |
duke@435 | 965 | LIR_Opr input = value.result(); |
duke@435 | 966 | LIR_Opr result = rlock(x); |
duke@435 | 967 | |
duke@435 | 968 | // arguments of lir_convert |
duke@435 | 969 | LIR_Opr conv_input = input; |
duke@435 | 970 | LIR_Opr conv_result = result; |
duke@435 | 971 | ConversionStub* stub = NULL; |
duke@435 | 972 | |
duke@435 | 973 | if (fixed_input) { |
duke@435 | 974 | conv_input = fixed_register_for(input->type()); |
duke@435 | 975 | __ move(input, conv_input); |
duke@435 | 976 | } |
duke@435 | 977 | |
duke@435 | 978 | assert(fixed_result == false || round_result == false, "cannot set both"); |
duke@435 | 979 | if (fixed_result) { |
duke@435 | 980 | conv_result = fixed_register_for(result->type()); |
duke@435 | 981 | } else if (round_result) { |
duke@435 | 982 | result = new_register(result->type()); |
duke@435 | 983 | set_vreg_flag(result, must_start_in_memory); |
duke@435 | 984 | } |
duke@435 | 985 | |
duke@435 | 986 | if (needs_stub) { |
duke@435 | 987 | stub = new ConversionStub(x->op(), conv_input, conv_result); |
duke@435 | 988 | } |
duke@435 | 989 | |
duke@435 | 990 | __ convert(x->op(), conv_input, conv_result, stub); |
duke@435 | 991 | |
duke@435 | 992 | if (result != conv_result) { |
duke@435 | 993 | __ move(conv_result, result); |
duke@435 | 994 | } |
duke@435 | 995 | |
duke@435 | 996 | assert(result->is_virtual(), "result must be virtual register"); |
duke@435 | 997 | set_result(x, result); |
duke@435 | 998 | } |
duke@435 | 999 | |
duke@435 | 1000 | |
duke@435 | 1001 | void LIRGenerator::do_NewInstance(NewInstance* x) { |
roland@2174 | 1002 | #ifndef PRODUCT |
duke@435 | 1003 | if (PrintNotLoaded && !x->klass()->is_loaded()) { |
roland@2174 | 1004 | tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); |
duke@435 | 1005 | } |
roland@2174 | 1006 | #endif |
duke@435 | 1007 | CodeEmitInfo* info = state_for(x, x->state()); |
duke@435 | 1008 | LIR_Opr reg = result_register_for(x->type()); |
duke@435 | 1009 | LIR_Opr klass_reg = new_register(objectType); |
duke@435 | 1010 | new_instance(reg, x->klass(), |
duke@435 | 1011 | FrameMap::rcx_oop_opr, |
duke@435 | 1012 | FrameMap::rdi_oop_opr, |
duke@435 | 1013 | FrameMap::rsi_oop_opr, |
duke@435 | 1014 | LIR_OprFact::illegalOpr, |
duke@435 | 1015 | FrameMap::rdx_oop_opr, info); |
duke@435 | 1016 | LIR_Opr result = rlock_result(x); |
duke@435 | 1017 | __ move(reg, result); |
duke@435 | 1018 | } |
duke@435 | 1019 | |
duke@435 | 1020 | |
duke@435 | 1021 | void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { |
duke@435 | 1022 | CodeEmitInfo* info = state_for(x, x->state()); |
duke@435 | 1023 | |
duke@435 | 1024 | LIRItem length(x->length(), this); |
duke@435 | 1025 | length.load_item_force(FrameMap::rbx_opr); |
duke@435 | 1026 | |
duke@435 | 1027 | LIR_Opr reg = result_register_for(x->type()); |
duke@435 | 1028 | LIR_Opr tmp1 = FrameMap::rcx_oop_opr; |
duke@435 | 1029 | LIR_Opr tmp2 = FrameMap::rsi_oop_opr; |
duke@435 | 1030 | LIR_Opr tmp3 = FrameMap::rdi_oop_opr; |
duke@435 | 1031 | LIR_Opr tmp4 = reg; |
duke@435 | 1032 | LIR_Opr klass_reg = FrameMap::rdx_oop_opr; |
duke@435 | 1033 | LIR_Opr len = length.result(); |
duke@435 | 1034 | BasicType elem_type = x->elt_type(); |
duke@435 | 1035 | |
jrose@1424 | 1036 | __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg); |
duke@435 | 1037 | |
duke@435 | 1038 | CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); |
duke@435 | 1039 | __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); |
duke@435 | 1040 | |
duke@435 | 1041 | LIR_Opr result = rlock_result(x); |
duke@435 | 1042 | __ move(reg, result); |
duke@435 | 1043 | } |
duke@435 | 1044 | |
duke@435 | 1045 | |
duke@435 | 1046 | void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { |
duke@435 | 1047 | LIRItem length(x->length(), this); |
duke@435 | 1048 | // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction |
duke@435 | 1049 | // and therefore provide the state before the parameters have been consumed |
duke@435 | 1050 | CodeEmitInfo* patching_info = NULL; |
duke@435 | 1051 | if (!x->klass()->is_loaded() || PatchALot) { |
duke@435 | 1052 | patching_info = state_for(x, x->state_before()); |
duke@435 | 1053 | } |
duke@435 | 1054 | |
duke@435 | 1055 | CodeEmitInfo* info = state_for(x, x->state()); |
duke@435 | 1056 | |
duke@435 | 1057 | const LIR_Opr reg = result_register_for(x->type()); |
duke@435 | 1058 | LIR_Opr tmp1 = FrameMap::rcx_oop_opr; |
duke@435 | 1059 | LIR_Opr tmp2 = FrameMap::rsi_oop_opr; |
duke@435 | 1060 | LIR_Opr tmp3 = FrameMap::rdi_oop_opr; |
duke@435 | 1061 | LIR_Opr tmp4 = reg; |
duke@435 | 1062 | LIR_Opr klass_reg = FrameMap::rdx_oop_opr; |
duke@435 | 1063 | |
duke@435 | 1064 | length.load_item_force(FrameMap::rbx_opr); |
duke@435 | 1065 | LIR_Opr len = length.result(); |
duke@435 | 1066 | |
duke@435 | 1067 | CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); |
duke@435 | 1068 | ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass()); |
duke@435 | 1069 | if (obj == ciEnv::unloaded_ciobjarrayklass()) { |
duke@435 | 1070 | BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); |
duke@435 | 1071 | } |
duke@435 | 1072 | jobject2reg_with_patching(klass_reg, obj, patching_info); |
duke@435 | 1073 | __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); |
duke@435 | 1074 | |
duke@435 | 1075 | LIR_Opr result = rlock_result(x); |
duke@435 | 1076 | __ move(reg, result); |
duke@435 | 1077 | } |
duke@435 | 1078 | |
duke@435 | 1079 | |
duke@435 | 1080 | void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { |
duke@435 | 1081 | Values* dims = x->dims(); |
duke@435 | 1082 | int i = dims->length(); |
duke@435 | 1083 | LIRItemList* items = new LIRItemList(dims->length(), NULL); |
duke@435 | 1084 | while (i-- > 0) { |
duke@435 | 1085 | LIRItem* size = new LIRItem(dims->at(i), this); |
duke@435 | 1086 | items->at_put(i, size); |
duke@435 | 1087 | } |
duke@435 | 1088 | |
never@1368 | 1089 | // Evaluate state_for early since it may emit code. |
duke@435 | 1090 | CodeEmitInfo* patching_info = NULL; |
duke@435 | 1091 | if (!x->klass()->is_loaded() || PatchALot) { |
duke@435 | 1092 | patching_info = state_for(x, x->state_before()); |
duke@435 | 1093 | |
duke@435 | 1094 | // cannot re-use same xhandlers for multiple CodeEmitInfos, so |
never@1368 | 1095 | // clone all handlers. This is handled transparently in other |
never@1368 | 1096 | // places by the CodeEmitInfo cloning logic but is handled |
never@1368 | 1097 | // specially here because a stub isn't being used. |
duke@435 | 1098 | x->set_exception_handlers(new XHandlers(x->exception_handlers())); |
duke@435 | 1099 | } |
duke@435 | 1100 | CodeEmitInfo* info = state_for(x, x->state()); |
duke@435 | 1101 | |
duke@435 | 1102 | i = dims->length(); |
duke@435 | 1103 | while (i-- > 0) { |
duke@435 | 1104 | LIRItem* size = items->at(i); |
duke@435 | 1105 | size->load_nonconstant(); |
duke@435 | 1106 | |
duke@435 | 1107 | store_stack_parameter(size->result(), in_ByteSize(i*4)); |
duke@435 | 1108 | } |
duke@435 | 1109 | |
duke@435 | 1110 | LIR_Opr reg = result_register_for(x->type()); |
duke@435 | 1111 | jobject2reg_with_patching(reg, x->klass(), patching_info); |
duke@435 | 1112 | |
duke@435 | 1113 | LIR_Opr rank = FrameMap::rbx_opr; |
duke@435 | 1114 | __ move(LIR_OprFact::intConst(x->rank()), rank); |
duke@435 | 1115 | LIR_Opr varargs = FrameMap::rcx_opr; |
duke@435 | 1116 | __ move(FrameMap::rsp_opr, varargs); |
duke@435 | 1117 | LIR_OprList* args = new LIR_OprList(3); |
duke@435 | 1118 | args->append(reg); |
duke@435 | 1119 | args->append(rank); |
duke@435 | 1120 | args->append(varargs); |
duke@435 | 1121 | __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), |
duke@435 | 1122 | LIR_OprFact::illegalOpr, |
duke@435 | 1123 | reg, args, info); |
duke@435 | 1124 | |
duke@435 | 1125 | LIR_Opr result = rlock_result(x); |
duke@435 | 1126 | __ move(reg, result); |
duke@435 | 1127 | } |
duke@435 | 1128 | |
duke@435 | 1129 | |
duke@435 | 1130 | void LIRGenerator::do_BlockBegin(BlockBegin* x) { |
duke@435 | 1131 | // nothing to do for now |
duke@435 | 1132 | } |
duke@435 | 1133 | |
duke@435 | 1134 | |
duke@435 | 1135 | void LIRGenerator::do_CheckCast(CheckCast* x) { |
duke@435 | 1136 | LIRItem obj(x->obj(), this); |
duke@435 | 1137 | |
duke@435 | 1138 | CodeEmitInfo* patching_info = NULL; |
duke@435 | 1139 | if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { |
duke@435 | 1140 | // must do this before locking the destination register as an oop register, |
duke@435 | 1141 | // and before the obj is loaded (the latter is for deoptimization) |
duke@435 | 1142 | patching_info = state_for(x, x->state_before()); |
duke@435 | 1143 | } |
duke@435 | 1144 | obj.load_item(); |
duke@435 | 1145 | |
duke@435 | 1146 | // info for exceptions |
roland@2174 | 1147 | CodeEmitInfo* info_for_exception = state_for(x); |
duke@435 | 1148 | |
duke@435 | 1149 | CodeStub* stub; |
duke@435 | 1150 | if (x->is_incompatible_class_change_check()) { |
duke@435 | 1151 | assert(patching_info == NULL, "can't patch this"); |
duke@435 | 1152 | stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); |
duke@435 | 1153 | } else { |
duke@435 | 1154 | stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); |
duke@435 | 1155 | } |
duke@435 | 1156 | LIR_Opr reg = rlock_result(x); |
iveresov@2344 | 1157 | LIR_Opr tmp3 = LIR_OprFact::illegalOpr; |
iveresov@2344 | 1158 | if (!x->klass()->is_loaded() || UseCompressedOops) { |
iveresov@2344 | 1159 | tmp3 = new_register(objectType); |
iveresov@2344 | 1160 | } |
duke@435 | 1161 | __ checkcast(reg, obj.result(), x->klass(), |
iveresov@2344 | 1162 | new_register(objectType), new_register(objectType), tmp3, |
duke@435 | 1163 | x->direct_compare(), info_for_exception, patching_info, stub, |
duke@435 | 1164 | x->profiled_method(), x->profiled_bci()); |
duke@435 | 1165 | } |
duke@435 | 1166 | |
duke@435 | 1167 | |
duke@435 | 1168 | void LIRGenerator::do_InstanceOf(InstanceOf* x) { |
duke@435 | 1169 | LIRItem obj(x->obj(), this); |
duke@435 | 1170 | |
duke@435 | 1171 | // result and test object may not be in same register |
duke@435 | 1172 | LIR_Opr reg = rlock_result(x); |
duke@435 | 1173 | CodeEmitInfo* patching_info = NULL; |
duke@435 | 1174 | if ((!x->klass()->is_loaded() || PatchALot)) { |
duke@435 | 1175 | // must do this before locking the destination register as an oop register |
duke@435 | 1176 | patching_info = state_for(x, x->state_before()); |
duke@435 | 1177 | } |
duke@435 | 1178 | obj.load_item(); |
iveresov@2344 | 1179 | LIR_Opr tmp3 = LIR_OprFact::illegalOpr; |
iveresov@2344 | 1180 | if (!x->klass()->is_loaded() || UseCompressedOops) { |
iveresov@2344 | 1181 | tmp3 = new_register(objectType); |
iveresov@2344 | 1182 | } |
duke@435 | 1183 | __ instanceof(reg, obj.result(), x->klass(), |
iveresov@2344 | 1184 | new_register(objectType), new_register(objectType), tmp3, |
iveresov@2146 | 1185 | x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); |
duke@435 | 1186 | } |
duke@435 | 1187 | |
duke@435 | 1188 | |
duke@435 | 1189 | void LIRGenerator::do_If(If* x) { |
duke@435 | 1190 | assert(x->number_of_sux() == 2, "inconsistency"); |
duke@435 | 1191 | ValueTag tag = x->x()->type()->tag(); |
duke@435 | 1192 | bool is_safepoint = x->is_safepoint(); |
duke@435 | 1193 | |
duke@435 | 1194 | If::Condition cond = x->cond(); |
duke@435 | 1195 | |
duke@435 | 1196 | LIRItem xitem(x->x(), this); |
duke@435 | 1197 | LIRItem yitem(x->y(), this); |
duke@435 | 1198 | LIRItem* xin = &xitem; |
duke@435 | 1199 | LIRItem* yin = &yitem; |
duke@435 | 1200 | |
duke@435 | 1201 | if (tag == longTag) { |
duke@435 | 1202 | // for longs, only conditions "eql", "neq", "lss", "geq" are valid; |
duke@435 | 1203 | // mirror for other conditions |
duke@435 | 1204 | if (cond == If::gtr || cond == If::leq) { |
duke@435 | 1205 | cond = Instruction::mirror(cond); |
duke@435 | 1206 | xin = &yitem; |
duke@435 | 1207 | yin = &xitem; |
duke@435 | 1208 | } |
duke@435 | 1209 | xin->set_destroys_register(); |
duke@435 | 1210 | } |
duke@435 | 1211 | xin->load_item(); |
duke@435 | 1212 | if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { |
duke@435 | 1213 | // inline long zero |
duke@435 | 1214 | yin->dont_load_item(); |
duke@435 | 1215 | } else if (tag == longTag || tag == floatTag || tag == doubleTag) { |
duke@435 | 1216 | // longs cannot handle constants at right side |
duke@435 | 1217 | yin->load_item(); |
duke@435 | 1218 | } else { |
duke@435 | 1219 | yin->dont_load_item(); |
duke@435 | 1220 | } |
duke@435 | 1221 | |
duke@435 | 1222 | // add safepoint before generating condition code so it can be recomputed |
duke@435 | 1223 | if (x->is_safepoint()) { |
duke@435 | 1224 | // increment backedge counter if needed |
iveresov@2138 | 1225 | increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); |
duke@435 | 1226 | __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); |
duke@435 | 1227 | } |
duke@435 | 1228 | set_no_result(x); |
duke@435 | 1229 | |
duke@435 | 1230 | LIR_Opr left = xin->result(); |
duke@435 | 1231 | LIR_Opr right = yin->result(); |
duke@435 | 1232 | __ cmp(lir_cond(cond), left, right); |
iveresov@2138 | 1233 | // Generate branch profiling. Profiling code doesn't kill flags. |
duke@435 | 1234 | profile_branch(x, cond); |
duke@435 | 1235 | move_to_phi(x->state()); |
duke@435 | 1236 | if (x->x()->type()->is_float_kind()) { |
duke@435 | 1237 | __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); |
duke@435 | 1238 | } else { |
duke@435 | 1239 | __ branch(lir_cond(cond), right->type(), x->tsux()); |
duke@435 | 1240 | } |
duke@435 | 1241 | assert(x->default_sux() == x->fsux(), "wrong destination above"); |
duke@435 | 1242 | __ jump(x->default_sux()); |
duke@435 | 1243 | } |
duke@435 | 1244 | |
duke@435 | 1245 | |
duke@435 | 1246 | LIR_Opr LIRGenerator::getThreadPointer() { |
never@739 | 1247 | #ifdef _LP64 |
never@739 | 1248 | return FrameMap::as_pointer_opr(r15_thread); |
never@739 | 1249 | #else |
duke@435 | 1250 | LIR_Opr result = new_register(T_INT); |
duke@435 | 1251 | __ get_thread(result); |
duke@435 | 1252 | return result; |
never@739 | 1253 | #endif // |
duke@435 | 1254 | } |
duke@435 | 1255 | |
duke@435 | 1256 | void LIRGenerator::trace_block_entry(BlockBegin* block) { |
duke@435 | 1257 | store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0)); |
duke@435 | 1258 | LIR_OprList* args = new LIR_OprList(); |
duke@435 | 1259 | address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); |
duke@435 | 1260 | __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args); |
duke@435 | 1261 | } |
duke@435 | 1262 | |
duke@435 | 1263 | |
duke@435 | 1264 | void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address, |
duke@435 | 1265 | CodeEmitInfo* info) { |
duke@435 | 1266 | if (address->type() == T_LONG) { |
duke@435 | 1267 | address = new LIR_Address(address->base(), |
duke@435 | 1268 | address->index(), address->scale(), |
duke@435 | 1269 | address->disp(), T_DOUBLE); |
duke@435 | 1270 | // Transfer the value atomically by using FP moves. This means |
duke@435 | 1271 | // the value has to be moved between CPU and FPU registers. It |
duke@435 | 1272 | // always has to be moved through spill slot since there's no |
duke@435 | 1273 | // quick way to pack the value into an SSE register. |
duke@435 | 1274 | LIR_Opr temp_double = new_register(T_DOUBLE); |
duke@435 | 1275 | LIR_Opr spill = new_register(T_LONG); |
duke@435 | 1276 | set_vreg_flag(spill, must_start_in_memory); |
duke@435 | 1277 | __ move(value, spill); |
duke@435 | 1278 | __ volatile_move(spill, temp_double, T_LONG); |
duke@435 | 1279 | __ volatile_move(temp_double, LIR_OprFact::address(address), T_LONG, info); |
duke@435 | 1280 | } else { |
duke@435 | 1281 | __ store(value, address, info); |
duke@435 | 1282 | } |
duke@435 | 1283 | } |
duke@435 | 1284 | |
duke@435 | 1285 | |
duke@435 | 1286 | |
duke@435 | 1287 | void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result, |
duke@435 | 1288 | CodeEmitInfo* info) { |
duke@435 | 1289 | if (address->type() == T_LONG) { |
duke@435 | 1290 | address = new LIR_Address(address->base(), |
duke@435 | 1291 | address->index(), address->scale(), |
duke@435 | 1292 | address->disp(), T_DOUBLE); |
duke@435 | 1293 | // Transfer the value atomically by using FP moves. This means |
duke@435 | 1294 | // the value has to be moved between CPU and FPU registers. In |
duke@435 | 1295 | // SSE0 and SSE1 mode it has to be moved through spill slot but in |
duke@435 | 1296 | // SSE2+ mode it can be moved directly. |
duke@435 | 1297 | LIR_Opr temp_double = new_register(T_DOUBLE); |
duke@435 | 1298 | __ volatile_move(LIR_OprFact::address(address), temp_double, T_LONG, info); |
duke@435 | 1299 | __ volatile_move(temp_double, result, T_LONG); |
duke@435 | 1300 | if (UseSSE < 2) { |
duke@435 | 1301 | // no spill slot needed in SSE2 mode because xmm->cpu register move is possible |
duke@435 | 1302 | set_vreg_flag(result, must_start_in_memory); |
duke@435 | 1303 | } |
duke@435 | 1304 | } else { |
duke@435 | 1305 | __ load(address, result, info); |
duke@435 | 1306 | } |
duke@435 | 1307 | } |
duke@435 | 1308 | |
duke@435 | 1309 | void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset, |
duke@435 | 1310 | BasicType type, bool is_volatile) { |
duke@435 | 1311 | if (is_volatile && type == T_LONG) { |
duke@435 | 1312 | LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE); |
duke@435 | 1313 | LIR_Opr tmp = new_register(T_DOUBLE); |
duke@435 | 1314 | __ load(addr, tmp); |
duke@435 | 1315 | LIR_Opr spill = new_register(T_LONG); |
duke@435 | 1316 | set_vreg_flag(spill, must_start_in_memory); |
duke@435 | 1317 | __ move(tmp, spill); |
duke@435 | 1318 | __ move(spill, dst); |
duke@435 | 1319 | } else { |
duke@435 | 1320 | LIR_Address* addr = new LIR_Address(src, offset, type); |
duke@435 | 1321 | __ load(addr, dst); |
duke@435 | 1322 | } |
duke@435 | 1323 | } |
duke@435 | 1324 | |
duke@435 | 1325 | |
duke@435 | 1326 | void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, |
duke@435 | 1327 | BasicType type, bool is_volatile) { |
duke@435 | 1328 | if (is_volatile && type == T_LONG) { |
duke@435 | 1329 | LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE); |
duke@435 | 1330 | LIR_Opr tmp = new_register(T_DOUBLE); |
duke@435 | 1331 | LIR_Opr spill = new_register(T_DOUBLE); |
duke@435 | 1332 | set_vreg_flag(spill, must_start_in_memory); |
duke@435 | 1333 | __ move(data, spill); |
duke@435 | 1334 | __ move(spill, tmp); |
duke@435 | 1335 | __ move(tmp, addr); |
duke@435 | 1336 | } else { |
duke@435 | 1337 | LIR_Address* addr = new LIR_Address(src, offset, type); |
duke@435 | 1338 | bool is_obj = (type == T_ARRAY || type == T_OBJECT); |
duke@435 | 1339 | if (is_obj) { |
ysr@777 | 1340 | // Do the pre-write barrier, if any. |
ysr@777 | 1341 | pre_barrier(LIR_OprFact::address(addr), false, NULL); |
duke@435 | 1342 | __ move(data, addr); |
duke@435 | 1343 | assert(src->is_register(), "must be register"); |
duke@435 | 1344 | // Seems to be a precise address |
duke@435 | 1345 | post_barrier(LIR_OprFact::address(addr), data); |
duke@435 | 1346 | } else { |
duke@435 | 1347 | __ move(data, addr); |
duke@435 | 1348 | } |
duke@435 | 1349 | } |
duke@435 | 1350 | } |