src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2603
1b4e6a5d98e0
child 2728
13bc79b5c9c8
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
iveresov@2432 2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "c1/c1_Compilation.hpp"
stefank@2314 27 #include "c1/c1_LIRAssembler.hpp"
stefank@2314 28 #include "c1/c1_MacroAssembler.hpp"
stefank@2314 29 #include "c1/c1_Runtime1.hpp"
stefank@2314 30 #include "c1/c1_ValueStack.hpp"
stefank@2314 31 #include "ci/ciArrayKlass.hpp"
stefank@2314 32 #include "ci/ciInstance.hpp"
stefank@2314 33 #include "gc_interface/collectedHeap.hpp"
stefank@2314 34 #include "memory/barrierSet.hpp"
stefank@2314 35 #include "memory/cardTableModRefBS.hpp"
stefank@2314 36 #include "nativeInst_sparc.hpp"
stefank@2314 37 #include "oops/objArrayKlass.hpp"
stefank@2314 38 #include "runtime/sharedRuntime.hpp"
duke@435 39
duke@435 40 #define __ _masm->
duke@435 41
duke@435 42
duke@435 43 //------------------------------------------------------------
duke@435 44
duke@435 45
duke@435 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
duke@435 47 if (opr->is_constant()) {
duke@435 48 LIR_Const* constant = opr->as_constant_ptr();
duke@435 49 switch (constant->type()) {
duke@435 50 case T_INT: {
duke@435 51 jint value = constant->as_jint();
duke@435 52 return Assembler::is_simm13(value);
duke@435 53 }
duke@435 54
duke@435 55 default:
duke@435 56 return false;
duke@435 57 }
duke@435 58 }
duke@435 59 return false;
duke@435 60 }
duke@435 61
duke@435 62
duke@435 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
duke@435 64 switch (op->code()) {
duke@435 65 case lir_null_check:
duke@435 66 return true;
duke@435 67
duke@435 68
duke@435 69 case lir_add:
duke@435 70 case lir_ushr:
duke@435 71 case lir_shr:
duke@435 72 case lir_shl:
duke@435 73 // integer shifts and adds are always one instruction
duke@435 74 return op->result_opr()->is_single_cpu();
duke@435 75
duke@435 76
duke@435 77 case lir_move: {
duke@435 78 LIR_Op1* op1 = op->as_Op1();
duke@435 79 LIR_Opr src = op1->in_opr();
duke@435 80 LIR_Opr dst = op1->result_opr();
duke@435 81
duke@435 82 if (src == dst) {
duke@435 83 NEEDS_CLEANUP;
duke@435 84 // this works around a problem where moves with the same src and dst
duke@435 85 // end up in the delay slot and then the assembler swallows the mov
duke@435 86 // since it has no effect and then it complains because the delay slot
duke@435 87 // is empty. returning false stops the optimizer from putting this in
duke@435 88 // the delay slot
duke@435 89 return false;
duke@435 90 }
duke@435 91
duke@435 92 // don't put moves involving oops into the delay slot since the VerifyOops code
duke@435 93 // will make it much larger than a single instruction.
duke@435 94 if (VerifyOops) {
duke@435 95 return false;
duke@435 96 }
duke@435 97
duke@435 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
duke@435 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
duke@435 100 return false;
duke@435 101 }
duke@435 102
iveresov@2344 103 if (UseCompressedOops) {
iveresov@2344 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
iveresov@2344 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
iveresov@2344 106 }
iveresov@2344 107
duke@435 108 if (dst->is_register()) {
duke@435 109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
duke@435 110 return !PatchALot;
duke@435 111 } else if (src->is_single_stack()) {
duke@435 112 return true;
duke@435 113 }
duke@435 114 }
duke@435 115
duke@435 116 if (src->is_register()) {
duke@435 117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
duke@435 118 return !PatchALot;
duke@435 119 } else if (dst->is_single_stack()) {
duke@435 120 return true;
duke@435 121 }
duke@435 122 }
duke@435 123
duke@435 124 if (dst->is_register() &&
duke@435 125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
duke@435 126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
duke@435 127 return true;
duke@435 128 }
duke@435 129
duke@435 130 return false;
duke@435 131 }
duke@435 132
duke@435 133 default:
duke@435 134 return false;
duke@435 135 }
duke@435 136 ShouldNotReachHere();
duke@435 137 }
duke@435 138
duke@435 139
duke@435 140 LIR_Opr LIR_Assembler::receiverOpr() {
duke@435 141 return FrameMap::O0_oop_opr;
duke@435 142 }
duke@435 143
duke@435 144
duke@435 145 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
duke@435 146 return FrameMap::I0_oop_opr;
duke@435 147 }
duke@435 148
duke@435 149
duke@435 150 LIR_Opr LIR_Assembler::osrBufferPointer() {
duke@435 151 return FrameMap::I0_opr;
duke@435 152 }
duke@435 153
duke@435 154
duke@435 155 int LIR_Assembler::initial_frame_size_in_bytes() {
duke@435 156 return in_bytes(frame_map()->framesize_in_bytes());
duke@435 157 }
duke@435 158
duke@435 159
duke@435 160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
duke@435 161 // we fetch the class of the receiver (O0) and compare it with the cached class.
duke@435 162 // If they do not match we jump to slow case.
duke@435 163 int LIR_Assembler::check_icache() {
duke@435 164 int offset = __ offset();
duke@435 165 __ inline_cache_check(O0, G5_inline_cache_reg);
duke@435 166 return offset;
duke@435 167 }
duke@435 168
duke@435 169
duke@435 170 void LIR_Assembler::osr_entry() {
duke@435 171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
duke@435 172 //
duke@435 173 // 1. Create a new compiled activation.
duke@435 174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
duke@435 175 // at the osr_bci; it is not initialized.
duke@435 176 // 3. Jump to the continuation address in compiled code to resume execution.
duke@435 177
duke@435 178 // OSR entry point
duke@435 179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
duke@435 180 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
duke@435 181 ValueStack* entry_state = osr_entry->end()->state();
duke@435 182 int number_of_locks = entry_state->locks_size();
duke@435 183
duke@435 184 // Create a frame for the compiled activation.
duke@435 185 __ build_frame(initial_frame_size_in_bytes());
duke@435 186
duke@435 187 // OSR buffer is
duke@435 188 //
duke@435 189 // locals[nlocals-1..0]
duke@435 190 // monitors[number_of_locks-1..0]
duke@435 191 //
duke@435 192 // locals is a direct copy of the interpreter frame so in the osr buffer
duke@435 193 // so first slot in the local array is the last local from the interpreter
duke@435 194 // and last slot is local[0] (receiver) from the interpreter
duke@435 195 //
duke@435 196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
duke@435 197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
duke@435 198 // in the interpreter frame (the method lock if a sync method)
duke@435 199
duke@435 200 // Initialize monitors in the compiled activation.
duke@435 201 // I0: pointer to osr buffer
duke@435 202 //
duke@435 203 // All other registers are dead at this point and the locals will be
duke@435 204 // copied into place by code emitted in the IR.
duke@435 205
duke@435 206 Register OSR_buf = osrBufferPointer()->as_register();
duke@435 207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
duke@435 208 int monitor_offset = BytesPerWord * method()->max_locals() +
roland@1495 209 (2 * BytesPerWord) * (number_of_locks - 1);
roland@1495 210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
roland@1495 211 // the OSR buffer using 2 word entries: first the lock and then
roland@1495 212 // the oop.
duke@435 213 for (int i = 0; i < number_of_locks; i++) {
roland@1495 214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
duke@435 215 #ifdef ASSERT
duke@435 216 // verify the interpreter's monitor has a non-null object
duke@435 217 {
duke@435 218 Label L;
roland@1495 219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
duke@435 220 __ cmp(G0, O7);
duke@435 221 __ br(Assembler::notEqual, false, Assembler::pt, L);
duke@435 222 __ delayed()->nop();
duke@435 223 __ stop("locked object is NULL");
duke@435 224 __ bind(L);
duke@435 225 }
duke@435 226 #endif // ASSERT
duke@435 227 // Copy the lock field into the compiled activation.
roland@1495 228 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
duke@435 229 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
roland@1495 230 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
duke@435 231 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
duke@435 232 }
duke@435 233 }
duke@435 234 }
duke@435 235
duke@435 236
duke@435 237 // Optimized Library calls
duke@435 238 // This is the fast version of java.lang.String.compare; it has not
duke@435 239 // OSR-entry and therefore, we generate a slow version for OSR's
duke@435 240 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
duke@435 241 Register str0 = left->as_register();
duke@435 242 Register str1 = right->as_register();
duke@435 243
duke@435 244 Label Ldone;
duke@435 245
duke@435 246 Register result = dst->as_register();
duke@435 247 {
duke@435 248 // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0
duke@435 249 // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1
duke@435 250 // Also, get string0.count-string1.count in o7 and get the condition code set
duke@435 251 // Note: some instructions have been hoisted for better instruction scheduling
duke@435 252
duke@435 253 Register tmp0 = L0;
duke@435 254 Register tmp1 = L1;
duke@435 255 Register tmp2 = L2;
duke@435 256
duke@435 257 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
duke@435 258 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
duke@435 259 int count_offset = java_lang_String:: count_offset_in_bytes();
duke@435 260
iveresov@2344 261 __ load_heap_oop(str0, value_offset, tmp0);
twisti@1162 262 __ ld(str0, offset_offset, tmp2);
duke@435 263 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
twisti@1162 264 __ ld(str0, count_offset, str0);
duke@435 265 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
duke@435 266
duke@435 267 // str1 may be null
duke@435 268 add_debug_info_for_null_check_here(info);
duke@435 269
iveresov@2344 270 __ load_heap_oop(str1, value_offset, tmp1);
duke@435 271 __ add(tmp0, tmp2, tmp0);
duke@435 272
twisti@1162 273 __ ld(str1, offset_offset, tmp2);
duke@435 274 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
twisti@1162 275 __ ld(str1, count_offset, str1);
duke@435 276 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
duke@435 277 __ subcc(str0, str1, O7);
duke@435 278 __ add(tmp1, tmp2, tmp1);
duke@435 279 }
duke@435 280
duke@435 281 {
duke@435 282 // Compute the minimum of the string lengths, scale it and store it in limit
duke@435 283 Register count0 = I0;
duke@435 284 Register count1 = I1;
duke@435 285 Register limit = L3;
duke@435 286
duke@435 287 Label Lskip;
duke@435 288 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
duke@435 289 __ br(Assembler::greater, true, Assembler::pt, Lskip);
duke@435 290 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
duke@435 291 __ bind(Lskip);
duke@435 292
duke@435 293 // If either string is empty (or both of them) the result is the difference in lengths
duke@435 294 __ cmp(limit, 0);
duke@435 295 __ br(Assembler::equal, true, Assembler::pn, Ldone);
duke@435 296 __ delayed()->mov(O7, result); // result is difference in lengths
duke@435 297 }
duke@435 298
duke@435 299 {
duke@435 300 // Neither string is empty
duke@435 301 Label Lloop;
duke@435 302
duke@435 303 Register base0 = L0;
duke@435 304 Register base1 = L1;
duke@435 305 Register chr0 = I0;
duke@435 306 Register chr1 = I1;
duke@435 307 Register limit = L3;
duke@435 308
duke@435 309 // Shift base0 and base1 to the end of the arrays, negate limit
duke@435 310 __ add(base0, limit, base0);
duke@435 311 __ add(base1, limit, base1);
duke@435 312 __ neg(limit); // limit = -min{string0.count, strin1.count}
duke@435 313
duke@435 314 __ lduh(base0, limit, chr0);
duke@435 315 __ bind(Lloop);
duke@435 316 __ lduh(base1, limit, chr1);
duke@435 317 __ subcc(chr0, chr1, chr0);
duke@435 318 __ br(Assembler::notZero, false, Assembler::pn, Ldone);
duke@435 319 assert(chr0 == result, "result must be pre-placed");
duke@435 320 __ delayed()->inccc(limit, sizeof(jchar));
duke@435 321 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
duke@435 322 __ delayed()->lduh(base0, limit, chr0);
duke@435 323 }
duke@435 324
duke@435 325 // If strings are equal up to min length, return the length difference.
duke@435 326 __ mov(O7, result);
duke@435 327
duke@435 328 // Otherwise, return the difference between the first mismatched chars.
duke@435 329 __ bind(Ldone);
duke@435 330 }
duke@435 331
duke@435 332
duke@435 333 // --------------------------------------------------------------------------------------------
duke@435 334
duke@435 335 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
duke@435 336 if (!GenerateSynchronizationCode) return;
duke@435 337
duke@435 338 Register obj_reg = obj_opr->as_register();
duke@435 339 Register lock_reg = lock_opr->as_register();
duke@435 340
duke@435 341 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
duke@435 342 Register reg = mon_addr.base();
duke@435 343 int offset = mon_addr.disp();
duke@435 344 // compute pointer to BasicLock
duke@435 345 if (mon_addr.is_simm13()) {
duke@435 346 __ add(reg, offset, lock_reg);
duke@435 347 }
duke@435 348 else {
duke@435 349 __ set(offset, lock_reg);
duke@435 350 __ add(reg, lock_reg, lock_reg);
duke@435 351 }
duke@435 352 // unlock object
duke@435 353 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
duke@435 354 // _slow_case_stubs->append(slow_case);
duke@435 355 // temporary fix: must be created after exceptionhandler, therefore as call stub
duke@435 356 _slow_case_stubs->append(slow_case);
duke@435 357 if (UseFastLocking) {
duke@435 358 // try inlined fast unlocking first, revert to slow locking if it fails
duke@435 359 // note: lock_reg points to the displaced header since the displaced header offset is 0!
duke@435 360 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 361 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
duke@435 362 } else {
duke@435 363 // always do slow unlocking
duke@435 364 // note: the slow unlocking code could be inlined here, however if we use
duke@435 365 // slow unlocking, speed doesn't matter anyway and this solution is
duke@435 366 // simpler and requires less duplicated code - additionally, the
duke@435 367 // slow unlocking code is the same in either case which simplifies
duke@435 368 // debugging
duke@435 369 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
duke@435 370 __ delayed()->nop();
duke@435 371 }
duke@435 372 // done
duke@435 373 __ bind(*slow_case->continuation());
duke@435 374 }
duke@435 375
duke@435 376
twisti@1639 377 int LIR_Assembler::emit_exception_handler() {
duke@435 378 // if the last instruction is a call (typically to do a throw which
duke@435 379 // is coming at the end after block reordering) the return address
duke@435 380 // must still point into the code area in order to avoid assertion
duke@435 381 // failures when searching for the corresponding bci => add a nop
duke@435 382 // (was bug 5/14/1999 - gri)
duke@435 383 __ nop();
duke@435 384
duke@435 385 // generate code for exception handler
duke@435 386 ciMethod* method = compilation()->method();
duke@435 387
duke@435 388 address handler_base = __ start_a_stub(exception_handler_size);
duke@435 389
duke@435 390 if (handler_base == NULL) {
duke@435 391 // not enough space left for the handler
duke@435 392 bailout("exception handler overflow");
twisti@1639 393 return -1;
duke@435 394 }
twisti@1639 395
duke@435 396 int offset = code_offset();
duke@435 397
twisti@2603 398 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
duke@435 399 __ delayed()->nop();
twisti@2603 400 __ should_not_reach_here();
duke@435 401 assert(code_offset() - offset <= exception_handler_size, "overflow");
duke@435 402 __ end_a_stub();
twisti@1639 403
twisti@1639 404 return offset;
duke@435 405 }
duke@435 406
twisti@1639 407
never@1813 408 // Emit the code to remove the frame from the stack in the exception
never@1813 409 // unwind path.
never@1813 410 int LIR_Assembler::emit_unwind_handler() {
never@1813 411 #ifndef PRODUCT
never@1813 412 if (CommentedAssembly) {
never@1813 413 _masm->block_comment("Unwind handler");
never@1813 414 }
never@1813 415 #endif
never@1813 416
never@1813 417 int offset = code_offset();
never@1813 418
never@1813 419 // Fetch the exception from TLS and clear out exception related thread state
never@1813 420 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
never@1813 421 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
never@1813 422 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
never@1813 423
never@1813 424 __ bind(_unwind_handler_entry);
never@1813 425 __ verify_not_null_oop(O0);
never@1813 426 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
never@1813 427 __ mov(O0, I0); // Preserve the exception
never@1813 428 }
never@1813 429
never@1813 430 // Preform needed unlocking
never@1813 431 MonitorExitStub* stub = NULL;
never@1813 432 if (method()->is_synchronized()) {
never@1813 433 monitor_address(0, FrameMap::I1_opr);
never@1813 434 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
never@1813 435 __ unlock_object(I3, I2, I1, *stub->entry());
never@1813 436 __ bind(*stub->continuation());
never@1813 437 }
never@1813 438
never@1813 439 if (compilation()->env()->dtrace_method_probes()) {
never@2185 440 __ mov(G2_thread, O0);
never@2185 441 jobject2reg(method()->constant_encoding(), O1);
never@1813 442 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
never@1813 443 __ delayed()->nop();
never@1813 444 }
never@1813 445
never@1813 446 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
never@1813 447 __ mov(I0, O0); // Restore the exception
never@1813 448 }
never@1813 449
never@1813 450 // dispatch to the unwind logic
never@1813 451 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
never@1813 452 __ delayed()->nop();
never@1813 453
never@1813 454 // Emit the slow path assembly
never@1813 455 if (stub != NULL) {
never@1813 456 stub->emit_code(this);
never@1813 457 }
never@1813 458
never@1813 459 return offset;
never@1813 460 }
never@1813 461
never@1813 462
twisti@1639 463 int LIR_Assembler::emit_deopt_handler() {
duke@435 464 // if the last instruction is a call (typically to do a throw which
duke@435 465 // is coming at the end after block reordering) the return address
duke@435 466 // must still point into the code area in order to avoid assertion
duke@435 467 // failures when searching for the corresponding bci => add a nop
duke@435 468 // (was bug 5/14/1999 - gri)
duke@435 469 __ nop();
duke@435 470
duke@435 471 // generate code for deopt handler
duke@435 472 ciMethod* method = compilation()->method();
duke@435 473 address handler_base = __ start_a_stub(deopt_handler_size);
duke@435 474 if (handler_base == NULL) {
duke@435 475 // not enough space left for the handler
duke@435 476 bailout("deopt handler overflow");
twisti@1639 477 return -1;
duke@435 478 }
twisti@1639 479
duke@435 480 int offset = code_offset();
twisti@1162 481 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
twisti@1162 482 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
duke@435 483 __ delayed()->nop();
duke@435 484 assert(code_offset() - offset <= deopt_handler_size, "overflow");
duke@435 485 debug_only(__ stop("should have gone to the caller");)
duke@435 486 __ end_a_stub();
twisti@1639 487
twisti@1639 488 return offset;
duke@435 489 }
duke@435 490
duke@435 491
duke@435 492 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
duke@435 493 if (o == NULL) {
duke@435 494 __ set(NULL_WORD, reg);
duke@435 495 } else {
duke@435 496 int oop_index = __ oop_recorder()->find_index(o);
duke@435 497 RelocationHolder rspec = oop_Relocation::spec(oop_index);
duke@435 498 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
duke@435 499 }
duke@435 500 }
duke@435 501
duke@435 502
duke@435 503 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
duke@435 504 // Allocate a new index in oop table to hold the oop once it's been patched
duke@435 505 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
duke@435 506 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
duke@435 507
twisti@1162 508 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
twisti@1162 509 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
duke@435 510 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
duke@435 511 // NULL will be dynamically patched later and the patched value may be large. We must
duke@435 512 // therefore generate the sethi/add as a placeholders
twisti@1162 513 __ patchable_set(addrlit, reg);
duke@435 514
duke@435 515 patching_epilog(patch, lir_patch_normal, reg, info);
duke@435 516 }
duke@435 517
duke@435 518
duke@435 519 void LIR_Assembler::emit_op3(LIR_Op3* op) {
duke@435 520 Register Rdividend = op->in_opr1()->as_register();
duke@435 521 Register Rdivisor = noreg;
duke@435 522 Register Rscratch = op->in_opr3()->as_register();
duke@435 523 Register Rresult = op->result_opr()->as_register();
duke@435 524 int divisor = -1;
duke@435 525
duke@435 526 if (op->in_opr2()->is_register()) {
duke@435 527 Rdivisor = op->in_opr2()->as_register();
duke@435 528 } else {
duke@435 529 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
duke@435 530 assert(Assembler::is_simm13(divisor), "can only handle simm13");
duke@435 531 }
duke@435 532
duke@435 533 assert(Rdividend != Rscratch, "");
duke@435 534 assert(Rdivisor != Rscratch, "");
duke@435 535 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
duke@435 536
duke@435 537 if (Rdivisor == noreg && is_power_of_2(divisor)) {
duke@435 538 // convert division by a power of two into some shifts and logical operations
duke@435 539 if (op->code() == lir_idiv) {
duke@435 540 if (divisor == 2) {
duke@435 541 __ srl(Rdividend, 31, Rscratch);
duke@435 542 } else {
duke@435 543 __ sra(Rdividend, 31, Rscratch);
duke@435 544 __ and3(Rscratch, divisor - 1, Rscratch);
duke@435 545 }
duke@435 546 __ add(Rdividend, Rscratch, Rscratch);
duke@435 547 __ sra(Rscratch, log2_intptr(divisor), Rresult);
duke@435 548 return;
duke@435 549 } else {
duke@435 550 if (divisor == 2) {
duke@435 551 __ srl(Rdividend, 31, Rscratch);
duke@435 552 } else {
duke@435 553 __ sra(Rdividend, 31, Rscratch);
duke@435 554 __ and3(Rscratch, divisor - 1,Rscratch);
duke@435 555 }
duke@435 556 __ add(Rdividend, Rscratch, Rscratch);
duke@435 557 __ andn(Rscratch, divisor - 1,Rscratch);
duke@435 558 __ sub(Rdividend, Rscratch, Rresult);
duke@435 559 return;
duke@435 560 }
duke@435 561 }
duke@435 562
duke@435 563 __ sra(Rdividend, 31, Rscratch);
duke@435 564 __ wry(Rscratch);
duke@435 565 if (!VM_Version::v9_instructions_work()) {
duke@435 566 // v9 doesn't require these nops
duke@435 567 __ nop();
duke@435 568 __ nop();
duke@435 569 __ nop();
duke@435 570 __ nop();
duke@435 571 }
duke@435 572
duke@435 573 add_debug_info_for_div0_here(op->info());
duke@435 574
duke@435 575 if (Rdivisor != noreg) {
duke@435 576 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 577 } else {
duke@435 578 assert(Assembler::is_simm13(divisor), "can only handle simm13");
duke@435 579 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 580 }
duke@435 581
duke@435 582 Label skip;
duke@435 583 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
duke@435 584 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 585 __ bind(skip);
duke@435 586
duke@435 587 if (op->code() == lir_irem) {
duke@435 588 if (Rdivisor != noreg) {
duke@435 589 __ smul(Rscratch, Rdivisor, Rscratch);
duke@435 590 } else {
duke@435 591 __ smul(Rscratch, divisor, Rscratch);
duke@435 592 }
duke@435 593 __ sub(Rdividend, Rscratch, Rresult);
duke@435 594 }
duke@435 595 }
duke@435 596
duke@435 597
duke@435 598 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
duke@435 599 #ifdef ASSERT
duke@435 600 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
duke@435 601 if (op->block() != NULL) _branch_target_blocks.append(op->block());
duke@435 602 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
duke@435 603 #endif
duke@435 604 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
duke@435 605
duke@435 606 if (op->cond() == lir_cond_always) {
duke@435 607 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
duke@435 608 } else if (op->code() == lir_cond_float_branch) {
duke@435 609 assert(op->ublock() != NULL, "must have unordered successor");
duke@435 610 bool is_unordered = (op->ublock() == op->block());
duke@435 611 Assembler::Condition acond;
duke@435 612 switch (op->cond()) {
duke@435 613 case lir_cond_equal: acond = Assembler::f_equal; break;
duke@435 614 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
duke@435 615 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
duke@435 616 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
duke@435 617 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
duke@435 618 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
duke@435 619 default : ShouldNotReachHere();
duke@435 620 };
duke@435 621
duke@435 622 if (!VM_Version::v9_instructions_work()) {
duke@435 623 __ nop();
duke@435 624 }
duke@435 625 __ fb( acond, false, Assembler::pn, *(op->label()));
duke@435 626 } else {
duke@435 627 assert (op->code() == lir_branch, "just checking");
duke@435 628
duke@435 629 Assembler::Condition acond;
duke@435 630 switch (op->cond()) {
duke@435 631 case lir_cond_equal: acond = Assembler::equal; break;
duke@435 632 case lir_cond_notEqual: acond = Assembler::notEqual; break;
duke@435 633 case lir_cond_less: acond = Assembler::less; break;
duke@435 634 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
duke@435 635 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
duke@435 636 case lir_cond_greater: acond = Assembler::greater; break;
duke@435 637 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
duke@435 638 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
duke@435 639 default: ShouldNotReachHere();
duke@435 640 };
duke@435 641
duke@435 642 // sparc has different condition codes for testing 32-bit
duke@435 643 // vs. 64-bit values. We could always test xcc is we could
duke@435 644 // guarantee that 32-bit loads always sign extended but that isn't
duke@435 645 // true and since sign extension isn't free, it would impose a
duke@435 646 // slight cost.
duke@435 647 #ifdef _LP64
duke@435 648 if (op->type() == T_INT) {
duke@435 649 __ br(acond, false, Assembler::pn, *(op->label()));
duke@435 650 } else
duke@435 651 #endif
duke@435 652 __ brx(acond, false, Assembler::pn, *(op->label()));
duke@435 653 }
duke@435 654 // The peephole pass fills the delay slot
duke@435 655 }
duke@435 656
duke@435 657
duke@435 658 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
duke@435 659 Bytecodes::Code code = op->bytecode();
duke@435 660 LIR_Opr dst = op->result_opr();
duke@435 661
duke@435 662 switch(code) {
duke@435 663 case Bytecodes::_i2l: {
duke@435 664 Register rlo = dst->as_register_lo();
duke@435 665 Register rhi = dst->as_register_hi();
duke@435 666 Register rval = op->in_opr()->as_register();
duke@435 667 #ifdef _LP64
duke@435 668 __ sra(rval, 0, rlo);
duke@435 669 #else
duke@435 670 __ mov(rval, rlo);
duke@435 671 __ sra(rval, BitsPerInt-1, rhi);
duke@435 672 #endif
duke@435 673 break;
duke@435 674 }
duke@435 675 case Bytecodes::_i2d:
duke@435 676 case Bytecodes::_i2f: {
duke@435 677 bool is_double = (code == Bytecodes::_i2d);
duke@435 678 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
duke@435 679 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
duke@435 680 FloatRegister rsrc = op->in_opr()->as_float_reg();
duke@435 681 if (rsrc != rdst) {
duke@435 682 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
duke@435 683 }
duke@435 684 __ fitof(w, rdst, rdst);
duke@435 685 break;
duke@435 686 }
duke@435 687 case Bytecodes::_f2i:{
duke@435 688 FloatRegister rsrc = op->in_opr()->as_float_reg();
duke@435 689 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
duke@435 690 Label L;
duke@435 691 // result must be 0 if value is NaN; test by comparing value to itself
duke@435 692 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
duke@435 693 if (!VM_Version::v9_instructions_work()) {
duke@435 694 __ nop();
duke@435 695 }
duke@435 696 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
duke@435 697 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
duke@435 698 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
duke@435 699 // move integer result from float register to int register
duke@435 700 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
duke@435 701 __ bind (L);
duke@435 702 break;
duke@435 703 }
duke@435 704 case Bytecodes::_l2i: {
duke@435 705 Register rlo = op->in_opr()->as_register_lo();
duke@435 706 Register rhi = op->in_opr()->as_register_hi();
duke@435 707 Register rdst = dst->as_register();
duke@435 708 #ifdef _LP64
duke@435 709 __ sra(rlo, 0, rdst);
duke@435 710 #else
duke@435 711 __ mov(rlo, rdst);
duke@435 712 #endif
duke@435 713 break;
duke@435 714 }
duke@435 715 case Bytecodes::_d2f:
duke@435 716 case Bytecodes::_f2d: {
duke@435 717 bool is_double = (code == Bytecodes::_f2d);
duke@435 718 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
duke@435 719 LIR_Opr val = op->in_opr();
duke@435 720 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
duke@435 721 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
duke@435 722 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
duke@435 723 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
duke@435 724 __ ftof(vw, dw, rval, rdst);
duke@435 725 break;
duke@435 726 }
duke@435 727 case Bytecodes::_i2s:
duke@435 728 case Bytecodes::_i2b: {
duke@435 729 Register rval = op->in_opr()->as_register();
duke@435 730 Register rdst = dst->as_register();
duke@435 731 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
duke@435 732 __ sll (rval, shift, rdst);
duke@435 733 __ sra (rdst, shift, rdst);
duke@435 734 break;
duke@435 735 }
duke@435 736 case Bytecodes::_i2c: {
duke@435 737 Register rval = op->in_opr()->as_register();
duke@435 738 Register rdst = dst->as_register();
duke@435 739 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
duke@435 740 __ sll (rval, shift, rdst);
duke@435 741 __ srl (rdst, shift, rdst);
duke@435 742 break;
duke@435 743 }
duke@435 744
duke@435 745 default: ShouldNotReachHere();
duke@435 746 }
duke@435 747 }
duke@435 748
duke@435 749
duke@435 750 void LIR_Assembler::align_call(LIR_Code) {
duke@435 751 // do nothing since all instructions are word aligned on sparc
duke@435 752 }
duke@435 753
duke@435 754
twisti@1730 755 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
twisti@1730 756 __ call(op->addr(), rtype);
twisti@1919 757 // The peephole pass fills the delay slot, add_call_info is done in
twisti@1919 758 // LIR_Assembler::emit_delay.
duke@435 759 }
duke@435 760
duke@435 761
twisti@1730 762 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
duke@435 763 RelocationHolder rspec = virtual_call_Relocation::spec(pc());
duke@435 764 __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
duke@435 765 __ relocate(rspec);
twisti@1730 766 __ call(op->addr(), relocInfo::none);
twisti@1919 767 // The peephole pass fills the delay slot, add_call_info is done in
twisti@1919 768 // LIR_Assembler::emit_delay.
duke@435 769 }
duke@435 770
duke@435 771
twisti@1730 772 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
twisti@1730 773 add_debug_info_for_null_check_here(op->info());
iveresov@2344 774 __ load_klass(O0, G3_scratch);
twisti@1730 775 if (__ is_simm13(op->vtable_offset())) {
twisti@1730 776 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
duke@435 777 } else {
duke@435 778 // This will generate 2 instructions
twisti@1730 779 __ set(op->vtable_offset(), G5_method);
duke@435 780 // ld_ptr, set_hi, set
duke@435 781 __ ld_ptr(G3_scratch, G5_method, G5_method);
duke@435 782 }
twisti@1162 783 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch);
duke@435 784 __ callr(G3_scratch, G0);
duke@435 785 // the peephole pass fills the delay slot
duke@435 786 }
duke@435 787
iveresov@2344 788 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
duke@435 789 int store_offset;
duke@435 790 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
duke@435 791 assert(!unaligned, "can't handle this");
duke@435 792 // for offsets larger than a simm13 we setup the offset in O7
twisti@1162 793 __ set(offset, O7);
iveresov@2344 794 store_offset = store(from_reg, base, O7, type, wide);
duke@435 795 } else {
iveresov@2344 796 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 797 __ verify_oop(from_reg->as_register());
iveresov@2344 798 }
duke@435 799 store_offset = code_offset();
duke@435 800 switch (type) {
duke@435 801 case T_BOOLEAN: // fall through
duke@435 802 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
duke@435 803 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
duke@435 804 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
duke@435 805 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
duke@435 806 case T_LONG :
duke@435 807 #ifdef _LP64
duke@435 808 if (unaligned || PatchALot) {
duke@435 809 __ srax(from_reg->as_register_lo(), 32, O7);
duke@435 810 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
duke@435 811 __ stw(O7, base, offset + hi_word_offset_in_bytes);
duke@435 812 } else {
duke@435 813 __ stx(from_reg->as_register_lo(), base, offset);
duke@435 814 }
duke@435 815 #else
duke@435 816 assert(Assembler::is_simm13(offset + 4), "must be");
duke@435 817 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
duke@435 818 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
duke@435 819 #endif
duke@435 820 break;
iveresov@2344 821 case T_ADDRESS:
iveresov@2344 822 __ st_ptr(from_reg->as_register(), base, offset);
iveresov@2344 823 break;
duke@435 824 case T_ARRAY : // fall through
iveresov@2344 825 case T_OBJECT:
iveresov@2344 826 {
iveresov@2344 827 if (UseCompressedOops && !wide) {
iveresov@2344 828 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
iveresov@2344 829 store_offset = code_offset();
iveresov@2344 830 __ stw(G3_scratch, base, offset);
iveresov@2344 831 } else {
iveresov@2344 832 __ st_ptr(from_reg->as_register(), base, offset);
iveresov@2344 833 }
iveresov@2344 834 break;
iveresov@2344 835 }
iveresov@2344 836
duke@435 837 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
duke@435 838 case T_DOUBLE:
duke@435 839 {
duke@435 840 FloatRegister reg = from_reg->as_double_reg();
duke@435 841 // split unaligned stores
duke@435 842 if (unaligned || PatchALot) {
duke@435 843 assert(Assembler::is_simm13(offset + 4), "must be");
duke@435 844 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
duke@435 845 __ stf(FloatRegisterImpl::S, reg, base, offset);
duke@435 846 } else {
duke@435 847 __ stf(FloatRegisterImpl::D, reg, base, offset);
duke@435 848 }
duke@435 849 break;
duke@435 850 }
duke@435 851 default : ShouldNotReachHere();
duke@435 852 }
duke@435 853 }
duke@435 854 return store_offset;
duke@435 855 }
duke@435 856
duke@435 857
iveresov@2344 858 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
iveresov@2344 859 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 860 __ verify_oop(from_reg->as_register());
iveresov@2344 861 }
duke@435 862 int store_offset = code_offset();
duke@435 863 switch (type) {
duke@435 864 case T_BOOLEAN: // fall through
duke@435 865 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
duke@435 866 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
duke@435 867 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
duke@435 868 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
duke@435 869 case T_LONG :
duke@435 870 #ifdef _LP64
duke@435 871 __ stx(from_reg->as_register_lo(), base, disp);
duke@435 872 #else
duke@435 873 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
duke@435 874 __ std(from_reg->as_register_hi(), base, disp);
duke@435 875 #endif
duke@435 876 break;
iveresov@2344 877 case T_ADDRESS:
iveresov@2344 878 __ st_ptr(from_reg->as_register(), base, disp);
iveresov@2344 879 break;
duke@435 880 case T_ARRAY : // fall through
iveresov@2344 881 case T_OBJECT:
iveresov@2344 882 {
iveresov@2344 883 if (UseCompressedOops && !wide) {
iveresov@2344 884 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
iveresov@2344 885 store_offset = code_offset();
iveresov@2344 886 __ stw(G3_scratch, base, disp);
iveresov@2344 887 } else {
iveresov@2344 888 __ st_ptr(from_reg->as_register(), base, disp);
iveresov@2344 889 }
iveresov@2344 890 break;
iveresov@2344 891 }
duke@435 892 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
duke@435 893 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
duke@435 894 default : ShouldNotReachHere();
duke@435 895 }
duke@435 896 return store_offset;
duke@435 897 }
duke@435 898
duke@435 899
iveresov@2344 900 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
duke@435 901 int load_offset;
duke@435 902 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
duke@435 903 assert(base != O7, "destroying register");
duke@435 904 assert(!unaligned, "can't handle this");
duke@435 905 // for offsets larger than a simm13 we setup the offset in O7
twisti@1162 906 __ set(offset, O7);
iveresov@2344 907 load_offset = load(base, O7, to_reg, type, wide);
duke@435 908 } else {
duke@435 909 load_offset = code_offset();
duke@435 910 switch(type) {
duke@435 911 case T_BOOLEAN: // fall through
duke@435 912 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
duke@435 913 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
duke@435 914 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
duke@435 915 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
duke@435 916 case T_LONG :
duke@435 917 if (!unaligned) {
duke@435 918 #ifdef _LP64
duke@435 919 __ ldx(base, offset, to_reg->as_register_lo());
duke@435 920 #else
duke@435 921 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
duke@435 922 "must be sequential");
duke@435 923 __ ldd(base, offset, to_reg->as_register_hi());
duke@435 924 #endif
duke@435 925 } else {
duke@435 926 #ifdef _LP64
duke@435 927 assert(base != to_reg->as_register_lo(), "can't handle this");
roland@1495 928 assert(O7 != to_reg->as_register_lo(), "can't handle this");
duke@435 929 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
roland@1495 930 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
duke@435 931 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
roland@1495 932 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
duke@435 933 #else
duke@435 934 if (base == to_reg->as_register_lo()) {
duke@435 935 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
duke@435 936 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
duke@435 937 } else {
duke@435 938 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
duke@435 939 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
duke@435 940 }
duke@435 941 #endif
duke@435 942 }
duke@435 943 break;
iveresov@2344 944 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
duke@435 945 case T_ARRAY : // fall through
iveresov@2344 946 case T_OBJECT:
iveresov@2344 947 {
iveresov@2344 948 if (UseCompressedOops && !wide) {
iveresov@2344 949 __ lduw(base, offset, to_reg->as_register());
iveresov@2344 950 __ decode_heap_oop(to_reg->as_register());
iveresov@2344 951 } else {
iveresov@2344 952 __ ld_ptr(base, offset, to_reg->as_register());
iveresov@2344 953 }
iveresov@2344 954 break;
iveresov@2344 955 }
duke@435 956 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
duke@435 957 case T_DOUBLE:
duke@435 958 {
duke@435 959 FloatRegister reg = to_reg->as_double_reg();
duke@435 960 // split unaligned loads
duke@435 961 if (unaligned || PatchALot) {
roland@1495 962 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
roland@1495 963 __ ldf(FloatRegisterImpl::S, base, offset, reg);
duke@435 964 } else {
duke@435 965 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
duke@435 966 }
duke@435 967 break;
duke@435 968 }
duke@435 969 default : ShouldNotReachHere();
duke@435 970 }
iveresov@2344 971 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 972 __ verify_oop(to_reg->as_register());
iveresov@2344 973 }
duke@435 974 }
duke@435 975 return load_offset;
duke@435 976 }
duke@435 977
duke@435 978
iveresov@2344 979 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
duke@435 980 int load_offset = code_offset();
duke@435 981 switch(type) {
duke@435 982 case T_BOOLEAN: // fall through
iveresov@2344 983 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
iveresov@2344 984 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
iveresov@2344 985 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
iveresov@2344 986 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
iveresov@2344 987 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
duke@435 988 case T_ARRAY : // fall through
iveresov@2344 989 case T_OBJECT:
iveresov@2344 990 {
iveresov@2344 991 if (UseCompressedOops && !wide) {
iveresov@2344 992 __ lduw(base, disp, to_reg->as_register());
iveresov@2344 993 __ decode_heap_oop(to_reg->as_register());
iveresov@2344 994 } else {
iveresov@2344 995 __ ld_ptr(base, disp, to_reg->as_register());
iveresov@2344 996 }
iveresov@2344 997 break;
iveresov@2344 998 }
duke@435 999 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
duke@435 1000 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
duke@435 1001 case T_LONG :
duke@435 1002 #ifdef _LP64
duke@435 1003 __ ldx(base, disp, to_reg->as_register_lo());
duke@435 1004 #else
duke@435 1005 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
duke@435 1006 "must be sequential");
duke@435 1007 __ ldd(base, disp, to_reg->as_register_hi());
duke@435 1008 #endif
duke@435 1009 break;
duke@435 1010 default : ShouldNotReachHere();
duke@435 1011 }
iveresov@2344 1012 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 1013 __ verify_oop(to_reg->as_register());
iveresov@2344 1014 }
duke@435 1015 return load_offset;
duke@435 1016 }
duke@435 1017
duke@435 1018 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
duke@435 1019 LIR_Const* c = src->as_constant_ptr();
duke@435 1020 switch (c->type()) {
duke@435 1021 case T_INT:
iveresov@2344 1022 case T_FLOAT: {
iveresov@2344 1023 Register src_reg = O7;
iveresov@2344 1024 int value = c->as_jint_bits();
iveresov@2344 1025 if (value == 0) {
iveresov@2344 1026 src_reg = G0;
iveresov@2344 1027 } else {
iveresov@2344 1028 __ set(value, O7);
iveresov@2344 1029 }
iveresov@2344 1030 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
iveresov@2344 1031 __ stw(src_reg, addr.base(), addr.disp());
iveresov@2344 1032 break;
iveresov@2344 1033 }
roland@1732 1034 case T_ADDRESS: {
duke@435 1035 Register src_reg = O7;
duke@435 1036 int value = c->as_jint_bits();
duke@435 1037 if (value == 0) {
duke@435 1038 src_reg = G0;
duke@435 1039 } else {
duke@435 1040 __ set(value, O7);
duke@435 1041 }
duke@435 1042 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
iveresov@2344 1043 __ st_ptr(src_reg, addr.base(), addr.disp());
duke@435 1044 break;
duke@435 1045 }
duke@435 1046 case T_OBJECT: {
duke@435 1047 Register src_reg = O7;
duke@435 1048 jobject2reg(c->as_jobject(), src_reg);
duke@435 1049 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1050 __ st_ptr(src_reg, addr.base(), addr.disp());
duke@435 1051 break;
duke@435 1052 }
duke@435 1053 case T_LONG:
duke@435 1054 case T_DOUBLE: {
duke@435 1055 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
duke@435 1056
duke@435 1057 Register tmp = O7;
duke@435 1058 int value_lo = c->as_jint_lo_bits();
duke@435 1059 if (value_lo == 0) {
duke@435 1060 tmp = G0;
duke@435 1061 } else {
duke@435 1062 __ set(value_lo, O7);
duke@435 1063 }
duke@435 1064 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
duke@435 1065 int value_hi = c->as_jint_hi_bits();
duke@435 1066 if (value_hi == 0) {
duke@435 1067 tmp = G0;
duke@435 1068 } else {
duke@435 1069 __ set(value_hi, O7);
duke@435 1070 }
duke@435 1071 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
duke@435 1072 break;
duke@435 1073 }
duke@435 1074 default:
duke@435 1075 Unimplemented();
duke@435 1076 }
duke@435 1077 }
duke@435 1078
duke@435 1079
iveresov@2344 1080 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
duke@435 1081 LIR_Const* c = src->as_constant_ptr();
duke@435 1082 LIR_Address* addr = dest->as_address_ptr();
duke@435 1083 Register base = addr->base()->as_pointer_register();
iveresov@2344 1084 int offset = -1;
iveresov@2344 1085
duke@435 1086 switch (c->type()) {
duke@435 1087 case T_INT:
roland@1732 1088 case T_FLOAT:
roland@1732 1089 case T_ADDRESS: {
duke@435 1090 LIR_Opr tmp = FrameMap::O7_opr;
duke@435 1091 int value = c->as_jint_bits();
duke@435 1092 if (value == 0) {
duke@435 1093 tmp = FrameMap::G0_opr;
duke@435 1094 } else if (Assembler::is_simm13(value)) {
duke@435 1095 __ set(value, O7);
duke@435 1096 }
duke@435 1097 if (addr->index()->is_valid()) {
duke@435 1098 assert(addr->disp() == 0, "must be zero");
iveresov@2344 1099 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
duke@435 1100 } else {
duke@435 1101 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
iveresov@2344 1102 offset = store(tmp, base, addr->disp(), type, wide, false);
duke@435 1103 }
duke@435 1104 break;
duke@435 1105 }
duke@435 1106 case T_LONG:
duke@435 1107 case T_DOUBLE: {
duke@435 1108 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
duke@435 1109 assert(Assembler::is_simm13(addr->disp()) &&
duke@435 1110 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
duke@435 1111
iveresov@2344 1112 LIR_Opr tmp = FrameMap::O7_opr;
duke@435 1113 int value_lo = c->as_jint_lo_bits();
duke@435 1114 if (value_lo == 0) {
iveresov@2344 1115 tmp = FrameMap::G0_opr;
duke@435 1116 } else {
duke@435 1117 __ set(value_lo, O7);
duke@435 1118 }
iveresov@2344 1119 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
duke@435 1120 int value_hi = c->as_jint_hi_bits();
duke@435 1121 if (value_hi == 0) {
iveresov@2344 1122 tmp = FrameMap::G0_opr;
duke@435 1123 } else {
duke@435 1124 __ set(value_hi, O7);
duke@435 1125 }
iveresov@2344 1126 offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
duke@435 1127 break;
duke@435 1128 }
duke@435 1129 case T_OBJECT: {
duke@435 1130 jobject obj = c->as_jobject();
duke@435 1131 LIR_Opr tmp;
duke@435 1132 if (obj == NULL) {
duke@435 1133 tmp = FrameMap::G0_opr;
duke@435 1134 } else {
duke@435 1135 tmp = FrameMap::O7_opr;
duke@435 1136 jobject2reg(c->as_jobject(), O7);
duke@435 1137 }
duke@435 1138 // handle either reg+reg or reg+disp address
duke@435 1139 if (addr->index()->is_valid()) {
duke@435 1140 assert(addr->disp() == 0, "must be zero");
iveresov@2344 1141 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
duke@435 1142 } else {
duke@435 1143 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
iveresov@2344 1144 offset = store(tmp, base, addr->disp(), type, wide, false);
duke@435 1145 }
duke@435 1146
duke@435 1147 break;
duke@435 1148 }
duke@435 1149 default:
duke@435 1150 Unimplemented();
duke@435 1151 }
iveresov@2344 1152 if (info != NULL) {
iveresov@2344 1153 assert(offset != -1, "offset should've been set");
iveresov@2344 1154 add_debug_info_for_null_check(offset, info);
iveresov@2344 1155 }
duke@435 1156 }
duke@435 1157
duke@435 1158
duke@435 1159 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
duke@435 1160 LIR_Const* c = src->as_constant_ptr();
duke@435 1161 LIR_Opr to_reg = dest;
duke@435 1162
duke@435 1163 switch (c->type()) {
duke@435 1164 case T_INT:
roland@1732 1165 case T_ADDRESS:
duke@435 1166 {
duke@435 1167 jint con = c->as_jint();
duke@435 1168 if (to_reg->is_single_cpu()) {
duke@435 1169 assert(patch_code == lir_patch_none, "no patching handled here");
duke@435 1170 __ set(con, to_reg->as_register());
duke@435 1171 } else {
duke@435 1172 ShouldNotReachHere();
duke@435 1173 assert(to_reg->is_single_fpu(), "wrong register kind");
duke@435 1174
duke@435 1175 __ set(con, O7);
twisti@1162 1176 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
duke@435 1177 __ st(O7, temp_slot);
duke@435 1178 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
duke@435 1179 }
duke@435 1180 }
duke@435 1181 break;
duke@435 1182
duke@435 1183 case T_LONG:
duke@435 1184 {
duke@435 1185 jlong con = c->as_jlong();
duke@435 1186
duke@435 1187 if (to_reg->is_double_cpu()) {
duke@435 1188 #ifdef _LP64
duke@435 1189 __ set(con, to_reg->as_register_lo());
duke@435 1190 #else
duke@435 1191 __ set(low(con), to_reg->as_register_lo());
duke@435 1192 __ set(high(con), to_reg->as_register_hi());
duke@435 1193 #endif
duke@435 1194 #ifdef _LP64
duke@435 1195 } else if (to_reg->is_single_cpu()) {
duke@435 1196 __ set(con, to_reg->as_register());
duke@435 1197 #endif
duke@435 1198 } else {
duke@435 1199 ShouldNotReachHere();
duke@435 1200 assert(to_reg->is_double_fpu(), "wrong register kind");
twisti@1162 1201 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
twisti@1162 1202 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
duke@435 1203 __ set(low(con), O7);
duke@435 1204 __ st(O7, temp_slot_lo);
duke@435 1205 __ set(high(con), O7);
duke@435 1206 __ st(O7, temp_slot_hi);
duke@435 1207 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
duke@435 1208 }
duke@435 1209 }
duke@435 1210 break;
duke@435 1211
duke@435 1212 case T_OBJECT:
duke@435 1213 {
duke@435 1214 if (patch_code == lir_patch_none) {
duke@435 1215 jobject2reg(c->as_jobject(), to_reg->as_register());
duke@435 1216 } else {
duke@435 1217 jobject2reg_with_patching(to_reg->as_register(), info);
duke@435 1218 }
duke@435 1219 }
duke@435 1220 break;
duke@435 1221
duke@435 1222 case T_FLOAT:
duke@435 1223 {
duke@435 1224 address const_addr = __ float_constant(c->as_jfloat());
duke@435 1225 if (const_addr == NULL) {
duke@435 1226 bailout("const section overflow");
duke@435 1227 break;
duke@435 1228 }
duke@435 1229 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
twisti@1162 1230 AddressLiteral const_addrlit(const_addr, rspec);
duke@435 1231 if (to_reg->is_single_fpu()) {
twisti@1162 1232 __ patchable_sethi(const_addrlit, O7);
duke@435 1233 __ relocate(rspec);
twisti@1162 1234 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
duke@435 1235
duke@435 1236 } else {
duke@435 1237 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
duke@435 1238
twisti@1162 1239 __ set(const_addrlit, O7);
iveresov@2344 1240 __ ld(O7, 0, to_reg->as_register());
duke@435 1241 }
duke@435 1242 }
duke@435 1243 break;
duke@435 1244
duke@435 1245 case T_DOUBLE:
duke@435 1246 {
duke@435 1247 address const_addr = __ double_constant(c->as_jdouble());
duke@435 1248 if (const_addr == NULL) {
duke@435 1249 bailout("const section overflow");
duke@435 1250 break;
duke@435 1251 }
duke@435 1252 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
duke@435 1253
duke@435 1254 if (to_reg->is_double_fpu()) {
twisti@1162 1255 AddressLiteral const_addrlit(const_addr, rspec);
twisti@1162 1256 __ patchable_sethi(const_addrlit, O7);
duke@435 1257 __ relocate(rspec);
twisti@1162 1258 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
duke@435 1259 } else {
duke@435 1260 assert(to_reg->is_double_cpu(), "Must be a long register.");
duke@435 1261 #ifdef _LP64
duke@435 1262 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
duke@435 1263 #else
duke@435 1264 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
duke@435 1265 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
duke@435 1266 #endif
duke@435 1267 }
duke@435 1268
duke@435 1269 }
duke@435 1270 break;
duke@435 1271
duke@435 1272 default:
duke@435 1273 ShouldNotReachHere();
duke@435 1274 }
duke@435 1275 }
duke@435 1276
duke@435 1277 Address LIR_Assembler::as_Address(LIR_Address* addr) {
duke@435 1278 Register reg = addr->base()->as_register();
twisti@1162 1279 return Address(reg, addr->disp());
duke@435 1280 }
duke@435 1281
duke@435 1282
duke@435 1283 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
duke@435 1284 switch (type) {
duke@435 1285 case T_INT:
duke@435 1286 case T_FLOAT: {
duke@435 1287 Register tmp = O7;
duke@435 1288 Address from = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1289 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1290 __ lduw(from.base(), from.disp(), tmp);
duke@435 1291 __ stw(tmp, to.base(), to.disp());
duke@435 1292 break;
duke@435 1293 }
duke@435 1294 case T_OBJECT: {
duke@435 1295 Register tmp = O7;
duke@435 1296 Address from = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1297 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1298 __ ld_ptr(from.base(), from.disp(), tmp);
duke@435 1299 __ st_ptr(tmp, to.base(), to.disp());
duke@435 1300 break;
duke@435 1301 }
duke@435 1302 case T_LONG:
duke@435 1303 case T_DOUBLE: {
duke@435 1304 Register tmp = O7;
duke@435 1305 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
duke@435 1306 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
duke@435 1307 __ lduw(from.base(), from.disp(), tmp);
duke@435 1308 __ stw(tmp, to.base(), to.disp());
duke@435 1309 __ lduw(from.base(), from.disp() + 4, tmp);
duke@435 1310 __ stw(tmp, to.base(), to.disp() + 4);
duke@435 1311 break;
duke@435 1312 }
duke@435 1313
duke@435 1314 default:
duke@435 1315 ShouldNotReachHere();
duke@435 1316 }
duke@435 1317 }
duke@435 1318
duke@435 1319
duke@435 1320 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
duke@435 1321 Address base = as_Address(addr);
twisti@1162 1322 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
duke@435 1323 }
duke@435 1324
duke@435 1325
duke@435 1326 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
duke@435 1327 Address base = as_Address(addr);
twisti@1162 1328 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
duke@435 1329 }
duke@435 1330
duke@435 1331
duke@435 1332 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
iveresov@2344 1333 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
duke@435 1334
duke@435 1335 LIR_Address* addr = src_opr->as_address_ptr();
duke@435 1336 LIR_Opr to_reg = dest;
duke@435 1337
duke@435 1338 Register src = addr->base()->as_pointer_register();
duke@435 1339 Register disp_reg = noreg;
duke@435 1340 int disp_value = addr->disp();
duke@435 1341 bool needs_patching = (patch_code != lir_patch_none);
duke@435 1342
duke@435 1343 if (addr->base()->type() == T_OBJECT) {
duke@435 1344 __ verify_oop(src);
duke@435 1345 }
duke@435 1346
duke@435 1347 PatchingStub* patch = NULL;
duke@435 1348 if (needs_patching) {
duke@435 1349 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
duke@435 1350 assert(!to_reg->is_double_cpu() ||
duke@435 1351 patch_code == lir_patch_none ||
duke@435 1352 patch_code == lir_patch_normal, "patching doesn't match register");
duke@435 1353 }
duke@435 1354
duke@435 1355 if (addr->index()->is_illegal()) {
duke@435 1356 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
duke@435 1357 if (needs_patching) {
twisti@1162 1358 __ patchable_set(0, O7);
duke@435 1359 } else {
duke@435 1360 __ set(disp_value, O7);
duke@435 1361 }
duke@435 1362 disp_reg = O7;
duke@435 1363 }
duke@435 1364 } else if (unaligned || PatchALot) {
duke@435 1365 __ add(src, addr->index()->as_register(), O7);
duke@435 1366 src = O7;
duke@435 1367 } else {
duke@435 1368 disp_reg = addr->index()->as_pointer_register();
duke@435 1369 assert(disp_value == 0, "can't handle 3 operand addresses");
duke@435 1370 }
duke@435 1371
duke@435 1372 // remember the offset of the load. The patching_epilog must be done
duke@435 1373 // before the call to add_debug_info, otherwise the PcDescs don't get
duke@435 1374 // entered in increasing order.
duke@435 1375 int offset = code_offset();
duke@435 1376
duke@435 1377 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
duke@435 1378 if (disp_reg == noreg) {
iveresov@2344 1379 offset = load(src, disp_value, to_reg, type, wide, unaligned);
duke@435 1380 } else {
duke@435 1381 assert(!unaligned, "can't handle this");
iveresov@2344 1382 offset = load(src, disp_reg, to_reg, type, wide);
duke@435 1383 }
duke@435 1384
duke@435 1385 if (patch != NULL) {
duke@435 1386 patching_epilog(patch, patch_code, src, info);
duke@435 1387 }
duke@435 1388 if (info != NULL) add_debug_info_for_null_check(offset, info);
duke@435 1389 }
duke@435 1390
duke@435 1391
duke@435 1392 void LIR_Assembler::prefetchr(LIR_Opr src) {
duke@435 1393 LIR_Address* addr = src->as_address_ptr();
duke@435 1394 Address from_addr = as_Address(addr);
duke@435 1395
duke@435 1396 if (VM_Version::has_v9()) {
duke@435 1397 __ prefetch(from_addr, Assembler::severalReads);
duke@435 1398 }
duke@435 1399 }
duke@435 1400
duke@435 1401
duke@435 1402 void LIR_Assembler::prefetchw(LIR_Opr src) {
duke@435 1403 LIR_Address* addr = src->as_address_ptr();
duke@435 1404 Address from_addr = as_Address(addr);
duke@435 1405
duke@435 1406 if (VM_Version::has_v9()) {
duke@435 1407 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
duke@435 1408 }
duke@435 1409 }
duke@435 1410
duke@435 1411
duke@435 1412 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
duke@435 1413 Address addr;
duke@435 1414 if (src->is_single_word()) {
duke@435 1415 addr = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1416 } else if (src->is_double_word()) {
duke@435 1417 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
duke@435 1418 }
duke@435 1419
duke@435 1420 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
iveresov@2344 1421 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
duke@435 1422 }
duke@435 1423
duke@435 1424
duke@435 1425 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
duke@435 1426 Address addr;
duke@435 1427 if (dest->is_single_word()) {
duke@435 1428 addr = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1429 } else if (dest->is_double_word()) {
duke@435 1430 addr = frame_map()->address_for_slot(dest->double_stack_ix());
duke@435 1431 }
duke@435 1432 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
iveresov@2344 1433 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
duke@435 1434 }
duke@435 1435
duke@435 1436
duke@435 1437 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
duke@435 1438 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
duke@435 1439 if (from_reg->is_double_fpu()) {
duke@435 1440 // double to double moves
duke@435 1441 assert(to_reg->is_double_fpu(), "should match");
duke@435 1442 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
duke@435 1443 } else {
duke@435 1444 // float to float moves
duke@435 1445 assert(to_reg->is_single_fpu(), "should match");
duke@435 1446 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
duke@435 1447 }
duke@435 1448 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
duke@435 1449 if (from_reg->is_double_cpu()) {
duke@435 1450 #ifdef _LP64
duke@435 1451 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
duke@435 1452 #else
duke@435 1453 assert(to_reg->is_double_cpu() &&
duke@435 1454 from_reg->as_register_hi() != to_reg->as_register_lo() &&
duke@435 1455 from_reg->as_register_lo() != to_reg->as_register_hi(),
duke@435 1456 "should both be long and not overlap");
duke@435 1457 // long to long moves
duke@435 1458 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
duke@435 1459 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
duke@435 1460 #endif
duke@435 1461 #ifdef _LP64
duke@435 1462 } else if (to_reg->is_double_cpu()) {
duke@435 1463 // int to int moves
duke@435 1464 __ mov(from_reg->as_register(), to_reg->as_register_lo());
duke@435 1465 #endif
duke@435 1466 } else {
duke@435 1467 // int to int moves
duke@435 1468 __ mov(from_reg->as_register(), to_reg->as_register());
duke@435 1469 }
duke@435 1470 } else {
duke@435 1471 ShouldNotReachHere();
duke@435 1472 }
duke@435 1473 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
duke@435 1474 __ verify_oop(to_reg->as_register());
duke@435 1475 }
duke@435 1476 }
duke@435 1477
duke@435 1478
duke@435 1479 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
duke@435 1480 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
iveresov@2344 1481 bool wide, bool unaligned) {
duke@435 1482 LIR_Address* addr = dest->as_address_ptr();
duke@435 1483
duke@435 1484 Register src = addr->base()->as_pointer_register();
duke@435 1485 Register disp_reg = noreg;
duke@435 1486 int disp_value = addr->disp();
duke@435 1487 bool needs_patching = (patch_code != lir_patch_none);
duke@435 1488
duke@435 1489 if (addr->base()->is_oop_register()) {
duke@435 1490 __ verify_oop(src);
duke@435 1491 }
duke@435 1492
duke@435 1493 PatchingStub* patch = NULL;
duke@435 1494 if (needs_patching) {
duke@435 1495 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
duke@435 1496 assert(!from_reg->is_double_cpu() ||
duke@435 1497 patch_code == lir_patch_none ||
duke@435 1498 patch_code == lir_patch_normal, "patching doesn't match register");
duke@435 1499 }
duke@435 1500
duke@435 1501 if (addr->index()->is_illegal()) {
duke@435 1502 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
duke@435 1503 if (needs_patching) {
twisti@1162 1504 __ patchable_set(0, O7);
duke@435 1505 } else {
duke@435 1506 __ set(disp_value, O7);
duke@435 1507 }
duke@435 1508 disp_reg = O7;
duke@435 1509 }
duke@435 1510 } else if (unaligned || PatchALot) {
duke@435 1511 __ add(src, addr->index()->as_register(), O7);
duke@435 1512 src = O7;
duke@435 1513 } else {
duke@435 1514 disp_reg = addr->index()->as_pointer_register();
duke@435 1515 assert(disp_value == 0, "can't handle 3 operand addresses");
duke@435 1516 }
duke@435 1517
duke@435 1518 // remember the offset of the store. The patching_epilog must be done
duke@435 1519 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
duke@435 1520 // entered in increasing order.
duke@435 1521 int offset;
duke@435 1522
duke@435 1523 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
duke@435 1524 if (disp_reg == noreg) {
iveresov@2344 1525 offset = store(from_reg, src, disp_value, type, wide, unaligned);
duke@435 1526 } else {
duke@435 1527 assert(!unaligned, "can't handle this");
iveresov@2344 1528 offset = store(from_reg, src, disp_reg, type, wide);
duke@435 1529 }
duke@435 1530
duke@435 1531 if (patch != NULL) {
duke@435 1532 patching_epilog(patch, patch_code, src, info);
duke@435 1533 }
duke@435 1534
duke@435 1535 if (info != NULL) add_debug_info_for_null_check(offset, info);
duke@435 1536 }
duke@435 1537
duke@435 1538
duke@435 1539 void LIR_Assembler::return_op(LIR_Opr result) {
duke@435 1540 // the poll may need a register so just pick one that isn't the return register
iveresov@2138 1541 #if defined(TIERED) && !defined(_LP64)
duke@435 1542 if (result->type_field() == LIR_OprDesc::long_type) {
duke@435 1543 // Must move the result to G1
duke@435 1544 // Must leave proper result in O0,O1 and G1 (TIERED only)
duke@435 1545 __ sllx(I0, 32, G1); // Shift bits into high G1
duke@435 1546 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
duke@435 1547 __ or3 (I1, G1, G1); // OR 64 bits into G1
iveresov@2138 1548 #ifdef ASSERT
iveresov@2138 1549 // mangle it so any problems will show up
iveresov@2138 1550 __ set(0xdeadbeef, I0);
iveresov@2138 1551 __ set(0xdeadbeef, I1);
iveresov@2138 1552 #endif
duke@435 1553 }
duke@435 1554 #endif // TIERED
duke@435 1555 __ set((intptr_t)os::get_polling_page(), L0);
duke@435 1556 __ relocate(relocInfo::poll_return_type);
duke@435 1557 __ ld_ptr(L0, 0, G0);
duke@435 1558 __ ret();
duke@435 1559 __ delayed()->restore();
duke@435 1560 }
duke@435 1561
duke@435 1562
duke@435 1563 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
duke@435 1564 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
duke@435 1565 if (info != NULL) {
duke@435 1566 add_debug_info_for_branch(info);
duke@435 1567 } else {
duke@435 1568 __ relocate(relocInfo::poll_type);
duke@435 1569 }
duke@435 1570
duke@435 1571 int offset = __ offset();
duke@435 1572 __ ld_ptr(tmp->as_register(), 0, G0);
duke@435 1573
duke@435 1574 return offset;
duke@435 1575 }
duke@435 1576
duke@435 1577
duke@435 1578 void LIR_Assembler::emit_static_call_stub() {
duke@435 1579 address call_pc = __ pc();
duke@435 1580 address stub = __ start_a_stub(call_stub_size);
duke@435 1581 if (stub == NULL) {
duke@435 1582 bailout("static call stub overflow");
duke@435 1583 return;
duke@435 1584 }
duke@435 1585
duke@435 1586 int start = __ offset();
duke@435 1587 __ relocate(static_stub_Relocation::spec(call_pc));
duke@435 1588
duke@435 1589 __ set_oop(NULL, G5);
duke@435 1590 // must be set to -1 at code generation time
twisti@1162 1591 AddressLiteral addrlit(-1);
twisti@1162 1592 __ jump_to(addrlit, G3);
duke@435 1593 __ delayed()->nop();
duke@435 1594
duke@435 1595 assert(__ offset() - start <= call_stub_size, "stub too big");
duke@435 1596 __ end_a_stub();
duke@435 1597 }
duke@435 1598
duke@435 1599
duke@435 1600 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
duke@435 1601 if (opr1->is_single_fpu()) {
duke@435 1602 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
duke@435 1603 } else if (opr1->is_double_fpu()) {
duke@435 1604 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
duke@435 1605 } else if (opr1->is_single_cpu()) {
duke@435 1606 if (opr2->is_constant()) {
duke@435 1607 switch (opr2->as_constant_ptr()->type()) {
duke@435 1608 case T_INT:
duke@435 1609 { jint con = opr2->as_constant_ptr()->as_jint();
duke@435 1610 if (Assembler::is_simm13(con)) {
duke@435 1611 __ cmp(opr1->as_register(), con);
duke@435 1612 } else {
duke@435 1613 __ set(con, O7);
duke@435 1614 __ cmp(opr1->as_register(), O7);
duke@435 1615 }
duke@435 1616 }
duke@435 1617 break;
duke@435 1618
duke@435 1619 case T_OBJECT:
duke@435 1620 // there are only equal/notequal comparisions on objects
duke@435 1621 { jobject con = opr2->as_constant_ptr()->as_jobject();
duke@435 1622 if (con == NULL) {
duke@435 1623 __ cmp(opr1->as_register(), 0);
duke@435 1624 } else {
duke@435 1625 jobject2reg(con, O7);
duke@435 1626 __ cmp(opr1->as_register(), O7);
duke@435 1627 }
duke@435 1628 }
duke@435 1629 break;
duke@435 1630
duke@435 1631 default:
duke@435 1632 ShouldNotReachHere();
duke@435 1633 break;
duke@435 1634 }
duke@435 1635 } else {
duke@435 1636 if (opr2->is_address()) {
duke@435 1637 LIR_Address * addr = opr2->as_address_ptr();
duke@435 1638 BasicType type = addr->type();
duke@435 1639 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
duke@435 1640 else __ ld(as_Address(addr), O7);
duke@435 1641 __ cmp(opr1->as_register(), O7);
duke@435 1642 } else {
duke@435 1643 __ cmp(opr1->as_register(), opr2->as_register());
duke@435 1644 }
duke@435 1645 }
duke@435 1646 } else if (opr1->is_double_cpu()) {
duke@435 1647 Register xlo = opr1->as_register_lo();
duke@435 1648 Register xhi = opr1->as_register_hi();
duke@435 1649 if (opr2->is_constant() && opr2->as_jlong() == 0) {
duke@435 1650 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
duke@435 1651 #ifdef _LP64
duke@435 1652 __ orcc(xhi, G0, G0);
duke@435 1653 #else
duke@435 1654 __ orcc(xhi, xlo, G0);
duke@435 1655 #endif
duke@435 1656 } else if (opr2->is_register()) {
duke@435 1657 Register ylo = opr2->as_register_lo();
duke@435 1658 Register yhi = opr2->as_register_hi();
duke@435 1659 #ifdef _LP64
duke@435 1660 __ cmp(xlo, ylo);
duke@435 1661 #else
duke@435 1662 __ subcc(xlo, ylo, xlo);
duke@435 1663 __ subccc(xhi, yhi, xhi);
duke@435 1664 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
duke@435 1665 __ orcc(xhi, xlo, G0);
duke@435 1666 }
duke@435 1667 #endif
duke@435 1668 } else {
duke@435 1669 ShouldNotReachHere();
duke@435 1670 }
duke@435 1671 } else if (opr1->is_address()) {
duke@435 1672 LIR_Address * addr = opr1->as_address_ptr();
duke@435 1673 BasicType type = addr->type();
duke@435 1674 assert (opr2->is_constant(), "Checking");
duke@435 1675 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
duke@435 1676 else __ ld(as_Address(addr), O7);
duke@435 1677 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
duke@435 1678 } else {
duke@435 1679 ShouldNotReachHere();
duke@435 1680 }
duke@435 1681 }
duke@435 1682
duke@435 1683
duke@435 1684 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
duke@435 1685 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
duke@435 1686 bool is_unordered_less = (code == lir_ucmp_fd2i);
duke@435 1687 if (left->is_single_fpu()) {
duke@435 1688 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
duke@435 1689 } else if (left->is_double_fpu()) {
duke@435 1690 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
duke@435 1691 } else {
duke@435 1692 ShouldNotReachHere();
duke@435 1693 }
duke@435 1694 } else if (code == lir_cmp_l2i) {
iveresov@1804 1695 #ifdef _LP64
iveresov@1804 1696 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
iveresov@1804 1697 #else
duke@435 1698 __ lcmp(left->as_register_hi(), left->as_register_lo(),
duke@435 1699 right->as_register_hi(), right->as_register_lo(),
duke@435 1700 dst->as_register());
iveresov@1804 1701 #endif
duke@435 1702 } else {
duke@435 1703 ShouldNotReachHere();
duke@435 1704 }
duke@435 1705 }
duke@435 1706
duke@435 1707
iveresov@2412 1708 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
duke@435 1709 Assembler::Condition acond;
duke@435 1710 switch (condition) {
duke@435 1711 case lir_cond_equal: acond = Assembler::equal; break;
duke@435 1712 case lir_cond_notEqual: acond = Assembler::notEqual; break;
duke@435 1713 case lir_cond_less: acond = Assembler::less; break;
duke@435 1714 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
duke@435 1715 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
duke@435 1716 case lir_cond_greater: acond = Assembler::greater; break;
duke@435 1717 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
duke@435 1718 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
duke@435 1719 default: ShouldNotReachHere();
duke@435 1720 };
duke@435 1721
duke@435 1722 if (opr1->is_constant() && opr1->type() == T_INT) {
duke@435 1723 Register dest = result->as_register();
duke@435 1724 // load up first part of constant before branch
duke@435 1725 // and do the rest in the delay slot.
duke@435 1726 if (!Assembler::is_simm13(opr1->as_jint())) {
duke@435 1727 __ sethi(opr1->as_jint(), dest);
duke@435 1728 }
duke@435 1729 } else if (opr1->is_constant()) {
duke@435 1730 const2reg(opr1, result, lir_patch_none, NULL);
duke@435 1731 } else if (opr1->is_register()) {
duke@435 1732 reg2reg(opr1, result);
duke@435 1733 } else if (opr1->is_stack()) {
duke@435 1734 stack2reg(opr1, result, result->type());
duke@435 1735 } else {
duke@435 1736 ShouldNotReachHere();
duke@435 1737 }
duke@435 1738 Label skip;
iveresov@2412 1739 #ifdef _LP64
iveresov@2412 1740 if (type == T_INT) {
iveresov@2412 1741 __ br(acond, false, Assembler::pt, skip);
iveresov@2412 1742 } else
iveresov@2412 1743 #endif
iveresov@2412 1744 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
duke@435 1745 if (opr1->is_constant() && opr1->type() == T_INT) {
duke@435 1746 Register dest = result->as_register();
duke@435 1747 if (Assembler::is_simm13(opr1->as_jint())) {
duke@435 1748 __ delayed()->or3(G0, opr1->as_jint(), dest);
duke@435 1749 } else {
duke@435 1750 // the sethi has been done above, so just put in the low 10 bits
duke@435 1751 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
duke@435 1752 }
duke@435 1753 } else {
duke@435 1754 // can't do anything useful in the delay slot
duke@435 1755 __ delayed()->nop();
duke@435 1756 }
duke@435 1757 if (opr2->is_constant()) {
duke@435 1758 const2reg(opr2, result, lir_patch_none, NULL);
duke@435 1759 } else if (opr2->is_register()) {
duke@435 1760 reg2reg(opr2, result);
duke@435 1761 } else if (opr2->is_stack()) {
duke@435 1762 stack2reg(opr2, result, result->type());
duke@435 1763 } else {
duke@435 1764 ShouldNotReachHere();
duke@435 1765 }
duke@435 1766 __ bind(skip);
duke@435 1767 }
duke@435 1768
duke@435 1769
duke@435 1770 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
duke@435 1771 assert(info == NULL, "unused on this code path");
duke@435 1772 assert(left->is_register(), "wrong items state");
duke@435 1773 assert(dest->is_register(), "wrong items state");
duke@435 1774
duke@435 1775 if (right->is_register()) {
duke@435 1776 if (dest->is_float_kind()) {
duke@435 1777
duke@435 1778 FloatRegister lreg, rreg, res;
duke@435 1779 FloatRegisterImpl::Width w;
duke@435 1780 if (right->is_single_fpu()) {
duke@435 1781 w = FloatRegisterImpl::S;
duke@435 1782 lreg = left->as_float_reg();
duke@435 1783 rreg = right->as_float_reg();
duke@435 1784 res = dest->as_float_reg();
duke@435 1785 } else {
duke@435 1786 w = FloatRegisterImpl::D;
duke@435 1787 lreg = left->as_double_reg();
duke@435 1788 rreg = right->as_double_reg();
duke@435 1789 res = dest->as_double_reg();
duke@435 1790 }
duke@435 1791
duke@435 1792 switch (code) {
duke@435 1793 case lir_add: __ fadd(w, lreg, rreg, res); break;
duke@435 1794 case lir_sub: __ fsub(w, lreg, rreg, res); break;
duke@435 1795 case lir_mul: // fall through
duke@435 1796 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
duke@435 1797 case lir_div: // fall through
duke@435 1798 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
duke@435 1799 default: ShouldNotReachHere();
duke@435 1800 }
duke@435 1801
duke@435 1802 } else if (dest->is_double_cpu()) {
duke@435 1803 #ifdef _LP64
duke@435 1804 Register dst_lo = dest->as_register_lo();
duke@435 1805 Register op1_lo = left->as_pointer_register();
duke@435 1806 Register op2_lo = right->as_pointer_register();
duke@435 1807
duke@435 1808 switch (code) {
duke@435 1809 case lir_add:
duke@435 1810 __ add(op1_lo, op2_lo, dst_lo);
duke@435 1811 break;
duke@435 1812
duke@435 1813 case lir_sub:
duke@435 1814 __ sub(op1_lo, op2_lo, dst_lo);
duke@435 1815 break;
duke@435 1816
duke@435 1817 default: ShouldNotReachHere();
duke@435 1818 }
duke@435 1819 #else
duke@435 1820 Register op1_lo = left->as_register_lo();
duke@435 1821 Register op1_hi = left->as_register_hi();
duke@435 1822 Register op2_lo = right->as_register_lo();
duke@435 1823 Register op2_hi = right->as_register_hi();
duke@435 1824 Register dst_lo = dest->as_register_lo();
duke@435 1825 Register dst_hi = dest->as_register_hi();
duke@435 1826
duke@435 1827 switch (code) {
duke@435 1828 case lir_add:
duke@435 1829 __ addcc(op1_lo, op2_lo, dst_lo);
duke@435 1830 __ addc (op1_hi, op2_hi, dst_hi);
duke@435 1831 break;
duke@435 1832
duke@435 1833 case lir_sub:
duke@435 1834 __ subcc(op1_lo, op2_lo, dst_lo);
duke@435 1835 __ subc (op1_hi, op2_hi, dst_hi);
duke@435 1836 break;
duke@435 1837
duke@435 1838 default: ShouldNotReachHere();
duke@435 1839 }
duke@435 1840 #endif
duke@435 1841 } else {
duke@435 1842 assert (right->is_single_cpu(), "Just Checking");
duke@435 1843
duke@435 1844 Register lreg = left->as_register();
duke@435 1845 Register res = dest->as_register();
duke@435 1846 Register rreg = right->as_register();
duke@435 1847 switch (code) {
duke@435 1848 case lir_add: __ add (lreg, rreg, res); break;
duke@435 1849 case lir_sub: __ sub (lreg, rreg, res); break;
duke@435 1850 case lir_mul: __ mult (lreg, rreg, res); break;
duke@435 1851 default: ShouldNotReachHere();
duke@435 1852 }
duke@435 1853 }
duke@435 1854 } else {
duke@435 1855 assert (right->is_constant(), "must be constant");
duke@435 1856
duke@435 1857 if (dest->is_single_cpu()) {
duke@435 1858 Register lreg = left->as_register();
duke@435 1859 Register res = dest->as_register();
duke@435 1860 int simm13 = right->as_constant_ptr()->as_jint();
duke@435 1861
duke@435 1862 switch (code) {
duke@435 1863 case lir_add: __ add (lreg, simm13, res); break;
duke@435 1864 case lir_sub: __ sub (lreg, simm13, res); break;
duke@435 1865 case lir_mul: __ mult (lreg, simm13, res); break;
duke@435 1866 default: ShouldNotReachHere();
duke@435 1867 }
duke@435 1868 } else {
duke@435 1869 Register lreg = left->as_pointer_register();
duke@435 1870 Register res = dest->as_register_lo();
duke@435 1871 long con = right->as_constant_ptr()->as_jlong();
duke@435 1872 assert(Assembler::is_simm13(con), "must be simm13");
duke@435 1873
duke@435 1874 switch (code) {
duke@435 1875 case lir_add: __ add (lreg, (int)con, res); break;
duke@435 1876 case lir_sub: __ sub (lreg, (int)con, res); break;
duke@435 1877 case lir_mul: __ mult (lreg, (int)con, res); break;
duke@435 1878 default: ShouldNotReachHere();
duke@435 1879 }
duke@435 1880 }
duke@435 1881 }
duke@435 1882 }
duke@435 1883
duke@435 1884
duke@435 1885 void LIR_Assembler::fpop() {
duke@435 1886 // do nothing
duke@435 1887 }
duke@435 1888
duke@435 1889
duke@435 1890 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
duke@435 1891 switch (code) {
duke@435 1892 case lir_sin:
duke@435 1893 case lir_tan:
duke@435 1894 case lir_cos: {
duke@435 1895 assert(thread->is_valid(), "preserve the thread object for performance reasons");
duke@435 1896 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
duke@435 1897 break;
duke@435 1898 }
duke@435 1899 case lir_sqrt: {
duke@435 1900 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
duke@435 1901 FloatRegister src_reg = value->as_double_reg();
duke@435 1902 FloatRegister dst_reg = dest->as_double_reg();
duke@435 1903 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
duke@435 1904 break;
duke@435 1905 }
duke@435 1906 case lir_abs: {
duke@435 1907 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
duke@435 1908 FloatRegister src_reg = value->as_double_reg();
duke@435 1909 FloatRegister dst_reg = dest->as_double_reg();
duke@435 1910 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
duke@435 1911 break;
duke@435 1912 }
duke@435 1913 default: {
duke@435 1914 ShouldNotReachHere();
duke@435 1915 break;
duke@435 1916 }
duke@435 1917 }
duke@435 1918 }
duke@435 1919
duke@435 1920
duke@435 1921 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
duke@435 1922 if (right->is_constant()) {
duke@435 1923 if (dest->is_single_cpu()) {
duke@435 1924 int simm13 = right->as_constant_ptr()->as_jint();
duke@435 1925 switch (code) {
duke@435 1926 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1927 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1928 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1929 default: ShouldNotReachHere();
duke@435 1930 }
duke@435 1931 } else {
duke@435 1932 long c = right->as_constant_ptr()->as_jlong();
duke@435 1933 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
duke@435 1934 int simm13 = (int)c;
duke@435 1935 switch (code) {
duke@435 1936 case lir_logic_and:
duke@435 1937 #ifndef _LP64
duke@435 1938 __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1939 #endif
duke@435 1940 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 1941 break;
duke@435 1942
duke@435 1943 case lir_logic_or:
duke@435 1944 #ifndef _LP64
duke@435 1945 __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1946 #endif
duke@435 1947 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 1948 break;
duke@435 1949
duke@435 1950 case lir_logic_xor:
duke@435 1951 #ifndef _LP64
duke@435 1952 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1953 #endif
duke@435 1954 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 1955 break;
duke@435 1956
duke@435 1957 default: ShouldNotReachHere();
duke@435 1958 }
duke@435 1959 }
duke@435 1960 } else {
duke@435 1961 assert(right->is_register(), "right should be in register");
duke@435 1962
duke@435 1963 if (dest->is_single_cpu()) {
duke@435 1964 switch (code) {
duke@435 1965 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 1966 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 1967 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 1968 default: ShouldNotReachHere();
duke@435 1969 }
duke@435 1970 } else {
duke@435 1971 #ifdef _LP64
duke@435 1972 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
duke@435 1973 left->as_register_lo();
duke@435 1974 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
duke@435 1975 right->as_register_lo();
duke@435 1976
duke@435 1977 switch (code) {
duke@435 1978 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
duke@435 1979 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
duke@435 1980 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
duke@435 1981 default: ShouldNotReachHere();
duke@435 1982 }
duke@435 1983 #else
duke@435 1984 switch (code) {
duke@435 1985 case lir_logic_and:
duke@435 1986 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 1987 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 1988 break;
duke@435 1989
duke@435 1990 case lir_logic_or:
duke@435 1991 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 1992 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 1993 break;
duke@435 1994
duke@435 1995 case lir_logic_xor:
duke@435 1996 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 1997 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 1998 break;
duke@435 1999
duke@435 2000 default: ShouldNotReachHere();
duke@435 2001 }
duke@435 2002 #endif
duke@435 2003 }
duke@435 2004 }
duke@435 2005 }
duke@435 2006
duke@435 2007
duke@435 2008 int LIR_Assembler::shift_amount(BasicType t) {
kvn@464 2009 int elem_size = type2aelembytes(t);
duke@435 2010 switch (elem_size) {
duke@435 2011 case 1 : return 0;
duke@435 2012 case 2 : return 1;
duke@435 2013 case 4 : return 2;
duke@435 2014 case 8 : return 3;
duke@435 2015 }
duke@435 2016 ShouldNotReachHere();
duke@435 2017 return -1;
duke@435 2018 }
duke@435 2019
duke@435 2020
never@1813 2021 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
duke@435 2022 assert(exceptionOop->as_register() == Oexception, "should match");
never@1813 2023 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
duke@435 2024
duke@435 2025 info->add_register_oop(exceptionOop);
duke@435 2026
never@1813 2027 // reuse the debug info from the safepoint poll for the throw op itself
never@1813 2028 address pc_for_athrow = __ pc();
never@1813 2029 int pc_for_athrow_offset = __ offset();
never@1813 2030 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
never@1813 2031 __ set(pc_for_athrow, Oissuing_pc, rspec);
never@1813 2032 add_call_info(pc_for_athrow_offset, info); // for exception handler
never@1813 2033
never@1813 2034 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
never@1813 2035 __ delayed()->nop();
never@1813 2036 }
never@1813 2037
never@1813 2038
never@1813 2039 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
never@1813 2040 assert(exceptionOop->as_register() == Oexception, "should match");
never@1813 2041
never@1813 2042 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
never@1813 2043 __ delayed()->nop();
duke@435 2044 }
duke@435 2045
duke@435 2046
duke@435 2047 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
duke@435 2048 Register src = op->src()->as_register();
duke@435 2049 Register dst = op->dst()->as_register();
duke@435 2050 Register src_pos = op->src_pos()->as_register();
duke@435 2051 Register dst_pos = op->dst_pos()->as_register();
duke@435 2052 Register length = op->length()->as_register();
duke@435 2053 Register tmp = op->tmp()->as_register();
duke@435 2054 Register tmp2 = O7;
duke@435 2055
duke@435 2056 int flags = op->flags();
duke@435 2057 ciArrayKlass* default_type = op->expected_type();
duke@435 2058 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
duke@435 2059 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
duke@435 2060
duke@435 2061 // set up the arraycopy stub information
duke@435 2062 ArrayCopyStub* stub = op->stub();
duke@435 2063
duke@435 2064 // always do stub if no type information is available. it's ok if
duke@435 2065 // the known type isn't loaded since the code sanity checks
duke@435 2066 // in debug mode and the type isn't required when we know the exact type
duke@435 2067 // also check that the type is an array type.
ysr@777 2068 // We also, for now, always call the stub if the barrier set requires a
ysr@777 2069 // write_ref_pre barrier (which the stub does, but none of the optimized
ysr@777 2070 // cases currently does).
ysr@777 2071 if (op->expected_type() == NULL ||
ysr@777 2072 Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) {
duke@435 2073 __ mov(src, O0);
duke@435 2074 __ mov(src_pos, O1);
duke@435 2075 __ mov(dst, O2);
duke@435 2076 __ mov(dst_pos, O3);
duke@435 2077 __ mov(length, O4);
duke@435 2078 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
duke@435 2079
duke@435 2080 __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
duke@435 2081 __ delayed()->nop();
duke@435 2082 __ bind(*stub->continuation());
duke@435 2083 return;
duke@435 2084 }
duke@435 2085
duke@435 2086 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
duke@435 2087
duke@435 2088 // make sure src and dst are non-null and load array length
duke@435 2089 if (flags & LIR_OpArrayCopy::src_null_check) {
duke@435 2090 __ tst(src);
iveresov@2344 2091 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
duke@435 2092 __ delayed()->nop();
duke@435 2093 }
duke@435 2094
duke@435 2095 if (flags & LIR_OpArrayCopy::dst_null_check) {
duke@435 2096 __ tst(dst);
iveresov@2344 2097 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
duke@435 2098 __ delayed()->nop();
duke@435 2099 }
duke@435 2100
duke@435 2101 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
duke@435 2102 // test src_pos register
duke@435 2103 __ tst(src_pos);
duke@435 2104 __ br(Assembler::less, false, Assembler::pn, *stub->entry());
duke@435 2105 __ delayed()->nop();
duke@435 2106 }
duke@435 2107
duke@435 2108 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
duke@435 2109 // test dst_pos register
duke@435 2110 __ tst(dst_pos);
duke@435 2111 __ br(Assembler::less, false, Assembler::pn, *stub->entry());
duke@435 2112 __ delayed()->nop();
duke@435 2113 }
duke@435 2114
duke@435 2115 if (flags & LIR_OpArrayCopy::length_positive_check) {
duke@435 2116 // make sure length isn't negative
duke@435 2117 __ tst(length);
duke@435 2118 __ br(Assembler::less, false, Assembler::pn, *stub->entry());
duke@435 2119 __ delayed()->nop();
duke@435 2120 }
duke@435 2121
duke@435 2122 if (flags & LIR_OpArrayCopy::src_range_check) {
duke@435 2123 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
duke@435 2124 __ add(length, src_pos, tmp);
duke@435 2125 __ cmp(tmp2, tmp);
duke@435 2126 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
duke@435 2127 __ delayed()->nop();
duke@435 2128 }
duke@435 2129
duke@435 2130 if (flags & LIR_OpArrayCopy::dst_range_check) {
duke@435 2131 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
duke@435 2132 __ add(length, dst_pos, tmp);
duke@435 2133 __ cmp(tmp2, tmp);
duke@435 2134 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
duke@435 2135 __ delayed()->nop();
duke@435 2136 }
duke@435 2137
duke@435 2138 if (flags & LIR_OpArrayCopy::type_check) {
iveresov@2344 2139 if (UseCompressedOops) {
iveresov@2344 2140 // We don't need decode because we just need to compare
iveresov@2344 2141 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
iveresov@2344 2142 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2143 __ cmp(tmp, tmp2);
iveresov@2344 2144 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
iveresov@2344 2145 } else {
iveresov@2344 2146 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
iveresov@2344 2147 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2148 __ cmp(tmp, tmp2);
iveresov@2344 2149 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
iveresov@2344 2150 }
duke@435 2151 __ delayed()->nop();
duke@435 2152 }
duke@435 2153
duke@435 2154 #ifdef ASSERT
duke@435 2155 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
duke@435 2156 // Sanity check the known type with the incoming class. For the
duke@435 2157 // primitive case the types must match exactly with src.klass and
duke@435 2158 // dst.klass each exactly matching the default type. For the
duke@435 2159 // object array case, if no type check is needed then either the
duke@435 2160 // dst type is exactly the expected type and the src type is a
duke@435 2161 // subtype which we can't check or src is the same array as dst
duke@435 2162 // but not necessarily exactly of type default_type.
duke@435 2163 Label known_ok, halt;
jrose@1424 2164 jobject2reg(op->expected_type()->constant_encoding(), tmp);
iveresov@2344 2165 if (UseCompressedOops) {
iveresov@2344 2166 // tmp holds the default type. It currently comes uncompressed after the
iveresov@2344 2167 // load of a constant, so encode it.
iveresov@2344 2168 __ encode_heap_oop(tmp);
iveresov@2344 2169 // load the raw value of the dst klass, since we will be comparing
iveresov@2344 2170 // uncompressed values directly.
iveresov@2344 2171 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2172 if (basic_type != T_OBJECT) {
iveresov@2344 2173 __ cmp(tmp, tmp2);
iveresov@2344 2174 __ br(Assembler::notEqual, false, Assembler::pn, halt);
iveresov@2344 2175 // load the raw value of the src klass.
iveresov@2344 2176 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2177 __ cmp(tmp, tmp2);
iveresov@2344 2178 __ br(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2179 __ delayed()->nop();
iveresov@2344 2180 } else {
iveresov@2344 2181 __ cmp(tmp, tmp2);
iveresov@2344 2182 __ br(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2183 __ delayed()->cmp(src, dst);
iveresov@2344 2184 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2185 __ delayed()->nop();
iveresov@2344 2186 }
duke@435 2187 } else {
iveresov@2344 2188 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2189 if (basic_type != T_OBJECT) {
iveresov@2344 2190 __ cmp(tmp, tmp2);
iveresov@2344 2191 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
iveresov@2344 2192 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2193 __ cmp(tmp, tmp2);
iveresov@2344 2194 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2195 __ delayed()->nop();
iveresov@2344 2196 } else {
iveresov@2344 2197 __ cmp(tmp, tmp2);
iveresov@2344 2198 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2199 __ delayed()->cmp(src, dst);
iveresov@2344 2200 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2201 __ delayed()->nop();
iveresov@2344 2202 }
duke@435 2203 }
duke@435 2204 __ bind(halt);
duke@435 2205 __ stop("incorrect type information in arraycopy");
duke@435 2206 __ bind(known_ok);
duke@435 2207 }
duke@435 2208 #endif
duke@435 2209
duke@435 2210 int shift = shift_amount(basic_type);
duke@435 2211
duke@435 2212 Register src_ptr = O0;
duke@435 2213 Register dst_ptr = O1;
duke@435 2214 Register len = O2;
duke@435 2215
duke@435 2216 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
roland@1495 2217 LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
duke@435 2218 if (shift == 0) {
duke@435 2219 __ add(src_ptr, src_pos, src_ptr);
duke@435 2220 } else {
duke@435 2221 __ sll(src_pos, shift, tmp);
duke@435 2222 __ add(src_ptr, tmp, src_ptr);
duke@435 2223 }
duke@435 2224
duke@435 2225 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
roland@1495 2226 LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
duke@435 2227 if (shift == 0) {
duke@435 2228 __ add(dst_ptr, dst_pos, dst_ptr);
duke@435 2229 } else {
duke@435 2230 __ sll(dst_pos, shift, tmp);
duke@435 2231 __ add(dst_ptr, tmp, dst_ptr);
duke@435 2232 }
duke@435 2233
duke@435 2234 if (basic_type != T_OBJECT) {
duke@435 2235 if (shift == 0) {
duke@435 2236 __ mov(length, len);
duke@435 2237 } else {
duke@435 2238 __ sll(length, shift, len);
duke@435 2239 }
duke@435 2240 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy));
duke@435 2241 } else {
duke@435 2242 // oop_arraycopy takes a length in number of elements, so don't scale it.
duke@435 2243 __ mov(length, len);
duke@435 2244 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy));
duke@435 2245 }
duke@435 2246
duke@435 2247 __ bind(*stub->continuation());
duke@435 2248 }
duke@435 2249
duke@435 2250
duke@435 2251 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
duke@435 2252 if (dest->is_single_cpu()) {
duke@435 2253 #ifdef _LP64
duke@435 2254 if (left->type() == T_OBJECT) {
duke@435 2255 switch (code) {
duke@435 2256 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2257 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2258 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2259 default: ShouldNotReachHere();
duke@435 2260 }
duke@435 2261 } else
duke@435 2262 #endif
duke@435 2263 switch (code) {
duke@435 2264 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2265 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2266 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2267 default: ShouldNotReachHere();
duke@435 2268 }
duke@435 2269 } else {
duke@435 2270 #ifdef _LP64
duke@435 2271 switch (code) {
duke@435 2272 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2273 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2274 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2275 default: ShouldNotReachHere();
duke@435 2276 }
duke@435 2277 #else
duke@435 2278 switch (code) {
duke@435 2279 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2280 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2281 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2282 default: ShouldNotReachHere();
duke@435 2283 }
duke@435 2284 #endif
duke@435 2285 }
duke@435 2286 }
duke@435 2287
duke@435 2288
duke@435 2289 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
duke@435 2290 #ifdef _LP64
duke@435 2291 if (left->type() == T_OBJECT) {
duke@435 2292 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
duke@435 2293 Register l = left->as_register();
duke@435 2294 Register d = dest->as_register_lo();
duke@435 2295 switch (code) {
duke@435 2296 case lir_shl: __ sllx (l, count, d); break;
duke@435 2297 case lir_shr: __ srax (l, count, d); break;
duke@435 2298 case lir_ushr: __ srlx (l, count, d); break;
duke@435 2299 default: ShouldNotReachHere();
duke@435 2300 }
duke@435 2301 return;
duke@435 2302 }
duke@435 2303 #endif
duke@435 2304
duke@435 2305 if (dest->is_single_cpu()) {
duke@435 2306 count = count & 0x1F; // Java spec
duke@435 2307 switch (code) {
duke@435 2308 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
duke@435 2309 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
duke@435 2310 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
duke@435 2311 default: ShouldNotReachHere();
duke@435 2312 }
duke@435 2313 } else if (dest->is_double_cpu()) {
duke@435 2314 count = count & 63; // Java spec
duke@435 2315 switch (code) {
duke@435 2316 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2317 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2318 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2319 default: ShouldNotReachHere();
duke@435 2320 }
duke@435 2321 } else {
duke@435 2322 ShouldNotReachHere();
duke@435 2323 }
duke@435 2324 }
duke@435 2325
duke@435 2326
duke@435 2327 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
duke@435 2328 assert(op->tmp1()->as_register() == G1 &&
duke@435 2329 op->tmp2()->as_register() == G3 &&
duke@435 2330 op->tmp3()->as_register() == G4 &&
duke@435 2331 op->obj()->as_register() == O0 &&
duke@435 2332 op->klass()->as_register() == G5, "must be");
duke@435 2333 if (op->init_check()) {
duke@435 2334 __ ld(op->klass()->as_register(),
duke@435 2335 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc),
duke@435 2336 op->tmp1()->as_register());
duke@435 2337 add_debug_info_for_null_check_here(op->stub()->info());
duke@435 2338 __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
duke@435 2339 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
duke@435 2340 __ delayed()->nop();
duke@435 2341 }
duke@435 2342 __ allocate_object(op->obj()->as_register(),
duke@435 2343 op->tmp1()->as_register(),
duke@435 2344 op->tmp2()->as_register(),
duke@435 2345 op->tmp3()->as_register(),
duke@435 2346 op->header_size(),
duke@435 2347 op->object_size(),
duke@435 2348 op->klass()->as_register(),
duke@435 2349 *op->stub()->entry());
duke@435 2350 __ bind(*op->stub()->continuation());
duke@435 2351 __ verify_oop(op->obj()->as_register());
duke@435 2352 }
duke@435 2353
duke@435 2354
duke@435 2355 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
duke@435 2356 assert(op->tmp1()->as_register() == G1 &&
duke@435 2357 op->tmp2()->as_register() == G3 &&
duke@435 2358 op->tmp3()->as_register() == G4 &&
duke@435 2359 op->tmp4()->as_register() == O1 &&
duke@435 2360 op->klass()->as_register() == G5, "must be");
iveresov@2432 2361
iveresov@2432 2362 LP64_ONLY( __ signx(op->len()->as_register()); )
duke@435 2363 if (UseSlowPath ||
duke@435 2364 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
duke@435 2365 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
never@1813 2366 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 2367 __ delayed()->nop();
duke@435 2368 } else {
duke@435 2369 __ allocate_array(op->obj()->as_register(),
duke@435 2370 op->len()->as_register(),
duke@435 2371 op->tmp1()->as_register(),
duke@435 2372 op->tmp2()->as_register(),
duke@435 2373 op->tmp3()->as_register(),
duke@435 2374 arrayOopDesc::header_size(op->type()),
kvn@464 2375 type2aelembytes(op->type()),
duke@435 2376 op->klass()->as_register(),
duke@435 2377 *op->stub()->entry());
duke@435 2378 }
duke@435 2379 __ bind(*op->stub()->continuation());
duke@435 2380 }
duke@435 2381
duke@435 2382
iveresov@2138 2383 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
iveresov@2138 2384 ciMethodData *md, ciProfileData *data,
iveresov@2138 2385 Register recv, Register tmp1, Label* update_done) {
iveresov@2138 2386 uint i;
iveresov@2138 2387 for (i = 0; i < VirtualCallData::row_limit(); i++) {
iveresov@2138 2388 Label next_test;
iveresov@2138 2389 // See if the receiver is receiver[n].
iveresov@2138 2390 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
iveresov@2138 2391 mdo_offset_bias);
iveresov@2138 2392 __ ld_ptr(receiver_addr, tmp1);
iveresov@2138 2393 __ verify_oop(tmp1);
iveresov@2138 2394 __ cmp(recv, tmp1);
iveresov@2138 2395 __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
iveresov@2138 2396 __ delayed()->nop();
iveresov@2138 2397 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
iveresov@2138 2398 mdo_offset_bias);
iveresov@2138 2399 __ ld_ptr(data_addr, tmp1);
iveresov@2138 2400 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2401 __ st_ptr(tmp1, data_addr);
iveresov@2138 2402 __ ba(false, *update_done);
iveresov@2138 2403 __ delayed()->nop();
iveresov@2138 2404 __ bind(next_test);
iveresov@2138 2405 }
iveresov@2138 2406
iveresov@2138 2407 // Didn't find receiver; find next empty slot and fill it in
iveresov@2138 2408 for (i = 0; i < VirtualCallData::row_limit(); i++) {
iveresov@2138 2409 Label next_test;
iveresov@2138 2410 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
iveresov@2138 2411 mdo_offset_bias);
iveresov@2344 2412 __ ld_ptr(recv_addr, tmp1);
iveresov@2138 2413 __ br_notnull(tmp1, false, Assembler::pt, next_test);
iveresov@2138 2414 __ delayed()->nop();
iveresov@2138 2415 __ st_ptr(recv, recv_addr);
iveresov@2138 2416 __ set(DataLayout::counter_increment, tmp1);
iveresov@2138 2417 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
iveresov@2138 2418 mdo_offset_bias);
iveresov@2138 2419 __ ba(false, *update_done);
iveresov@2138 2420 __ delayed()->nop();
iveresov@2138 2421 __ bind(next_test);
iveresov@2138 2422 }
iveresov@2138 2423 }
iveresov@2138 2424
iveresov@2146 2425
iveresov@2146 2426 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
iveresov@2146 2427 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
iveresov@2349 2428 md = method->method_data_or_null();
iveresov@2349 2429 assert(md != NULL, "Sanity");
iveresov@2146 2430 data = md->bci_to_data(bci);
iveresov@2146 2431 assert(data != NULL, "need data for checkcast");
iveresov@2146 2432 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
iveresov@2146 2433 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
iveresov@2146 2434 // The offset is large so bias the mdo by the base of the slot so
iveresov@2146 2435 // that the ld can use simm13s to reference the slots of the data
iveresov@2146 2436 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
iveresov@2146 2437 }
iveresov@2146 2438 }
iveresov@2146 2439
iveresov@2146 2440 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
iveresov@2138 2441 // we always need a stub for the failure case.
iveresov@2138 2442 CodeStub* stub = op->stub();
iveresov@2138 2443 Register obj = op->object()->as_register();
iveresov@2138 2444 Register k_RInfo = op->tmp1()->as_register();
iveresov@2138 2445 Register klass_RInfo = op->tmp2()->as_register();
iveresov@2138 2446 Register dst = op->result_opr()->as_register();
iveresov@2138 2447 Register Rtmp1 = op->tmp3()->as_register();
iveresov@2138 2448 ciKlass* k = op->klass();
iveresov@2138 2449
iveresov@2138 2450
iveresov@2138 2451 if (obj == k_RInfo) {
iveresov@2138 2452 k_RInfo = klass_RInfo;
iveresov@2138 2453 klass_RInfo = obj;
iveresov@2138 2454 }
iveresov@2138 2455
iveresov@2138 2456 ciMethodData* md;
iveresov@2138 2457 ciProfileData* data;
iveresov@2138 2458 int mdo_offset_bias = 0;
iveresov@2138 2459 if (op->should_profile()) {
iveresov@2138 2460 ciMethod* method = op->profiled_method();
iveresov@2138 2461 assert(method != NULL, "Should have method");
iveresov@2146 2462 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
iveresov@2146 2463
iveresov@2146 2464 Label not_null;
iveresov@2146 2465 __ br_notnull(obj, false, Assembler::pn, not_null);
iveresov@2138 2466 __ delayed()->nop();
iveresov@2138 2467 Register mdo = k_RInfo;
iveresov@2138 2468 Register data_val = Rtmp1;
iveresov@2138 2469 jobject2reg(md->constant_encoding(), mdo);
iveresov@2138 2470 if (mdo_offset_bias > 0) {
iveresov@2138 2471 __ set(mdo_offset_bias, data_val);
iveresov@2138 2472 __ add(mdo, data_val, mdo);
iveresov@2138 2473 }
iveresov@2138 2474 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
iveresov@2138 2475 __ ldub(flags_addr, data_val);
iveresov@2138 2476 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
iveresov@2138 2477 __ stb(data_val, flags_addr);
iveresov@2146 2478 __ ba(false, *obj_is_null);
iveresov@2146 2479 __ delayed()->nop();
iveresov@2146 2480 __ bind(not_null);
iveresov@2146 2481 } else {
iveresov@2146 2482 __ br_null(obj, false, Assembler::pn, *obj_is_null);
iveresov@2146 2483 __ delayed()->nop();
iveresov@2138 2484 }
iveresov@2146 2485
iveresov@2146 2486 Label profile_cast_failure, profile_cast_success;
iveresov@2146 2487 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
iveresov@2146 2488 Label *success_target = op->should_profile() ? &profile_cast_success : success;
iveresov@2138 2489
iveresov@2138 2490 // patching may screw with our temporaries on sparc,
iveresov@2138 2491 // so let's do it before loading the class
iveresov@2138 2492 if (k->is_loaded()) {
iveresov@2138 2493 jobject2reg(k->constant_encoding(), k_RInfo);
iveresov@2138 2494 } else {
iveresov@2138 2495 jobject2reg_with_patching(k_RInfo, op->info_for_patch());
iveresov@2138 2496 }
iveresov@2138 2497 assert(obj != k_RInfo, "must be different");
iveresov@2138 2498
iveresov@2138 2499 // get object class
iveresov@2138 2500 // not a safepoint as obj null check happens earlier
iveresov@2344 2501 __ load_klass(obj, klass_RInfo);
iveresov@2138 2502 if (op->fast_check()) {
iveresov@2138 2503 assert_different_registers(klass_RInfo, k_RInfo);
iveresov@2138 2504 __ cmp(k_RInfo, klass_RInfo);
iveresov@2138 2505 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
iveresov@2138 2506 __ delayed()->nop();
iveresov@2138 2507 } else {
iveresov@2138 2508 bool need_slow_path = true;
iveresov@2138 2509 if (k->is_loaded()) {
iveresov@2138 2510 if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
iveresov@2138 2511 need_slow_path = false;
iveresov@2138 2512 // perform the fast part of the checking logic
iveresov@2138 2513 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
iveresov@2146 2514 (need_slow_path ? success_target : NULL),
iveresov@2138 2515 failure_target, NULL,
iveresov@2138 2516 RegisterOrConstant(k->super_check_offset()));
iveresov@2138 2517 } else {
iveresov@2138 2518 // perform the fast part of the checking logic
iveresov@2146 2519 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
iveresov@2138 2520 failure_target, NULL);
iveresov@2138 2521 }
iveresov@2138 2522 if (need_slow_path) {
iveresov@2138 2523 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
iveresov@2138 2524 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
iveresov@2138 2525 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
iveresov@2138 2526 __ delayed()->nop();
iveresov@2138 2527 __ cmp(G3, 0);
iveresov@2138 2528 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
iveresov@2138 2529 __ delayed()->nop();
iveresov@2146 2530 // Fall through to success case
iveresov@2138 2531 }
iveresov@2138 2532 }
iveresov@2138 2533
iveresov@2138 2534 if (op->should_profile()) {
iveresov@2138 2535 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
iveresov@2138 2536 assert_different_registers(obj, mdo, recv, tmp1);
iveresov@2146 2537 __ bind(profile_cast_success);
iveresov@2138 2538 jobject2reg(md->constant_encoding(), mdo);
iveresov@2138 2539 if (mdo_offset_bias > 0) {
iveresov@2138 2540 __ set(mdo_offset_bias, tmp1);
iveresov@2138 2541 __ add(mdo, tmp1, mdo);
iveresov@2138 2542 }
iveresov@2344 2543 __ load_klass(obj, recv);
iveresov@2146 2544 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
iveresov@2138 2545 // Jump over the failure case
iveresov@2146 2546 __ ba(false, *success);
iveresov@2138 2547 __ delayed()->nop();
iveresov@2138 2548 // Cast failure case
iveresov@2138 2549 __ bind(profile_cast_failure);
iveresov@2138 2550 jobject2reg(md->constant_encoding(), mdo);
iveresov@2138 2551 if (mdo_offset_bias > 0) {
iveresov@2138 2552 __ set(mdo_offset_bias, tmp1);
iveresov@2138 2553 __ add(mdo, tmp1, mdo);
iveresov@2138 2554 }
iveresov@2138 2555 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
iveresov@2138 2556 __ ld_ptr(data_addr, tmp1);
iveresov@2138 2557 __ sub(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2558 __ st_ptr(tmp1, data_addr);
iveresov@2146 2559 __ ba(false, *failure);
iveresov@2138 2560 __ delayed()->nop();
iveresov@2138 2561 }
iveresov@2146 2562 __ ba(false, *success);
iveresov@2146 2563 __ delayed()->nop();
iveresov@2138 2564 }
iveresov@2138 2565
duke@435 2566 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
duke@435 2567 LIR_Code code = op->code();
duke@435 2568 if (code == lir_store_check) {
duke@435 2569 Register value = op->object()->as_register();
duke@435 2570 Register array = op->array()->as_register();
duke@435 2571 Register k_RInfo = op->tmp1()->as_register();
duke@435 2572 Register klass_RInfo = op->tmp2()->as_register();
duke@435 2573 Register Rtmp1 = op->tmp3()->as_register();
duke@435 2574
duke@435 2575 __ verify_oop(value);
duke@435 2576 CodeStub* stub = op->stub();
iveresov@2146 2577 // check if it needs to be profiled
iveresov@2146 2578 ciMethodData* md;
iveresov@2146 2579 ciProfileData* data;
iveresov@2146 2580 int mdo_offset_bias = 0;
iveresov@2146 2581 if (op->should_profile()) {
iveresov@2146 2582 ciMethod* method = op->profiled_method();
iveresov@2146 2583 assert(method != NULL, "Should have method");
iveresov@2146 2584 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
iveresov@2146 2585 }
iveresov@2146 2586 Label profile_cast_success, profile_cast_failure, done;
iveresov@2146 2587 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
iveresov@2146 2588 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
iveresov@2146 2589
iveresov@2146 2590 if (op->should_profile()) {
iveresov@2146 2591 Label not_null;
iveresov@2146 2592 __ br_notnull(value, false, Assembler::pn, not_null);
iveresov@2146 2593 __ delayed()->nop();
iveresov@2146 2594 Register mdo = k_RInfo;
iveresov@2146 2595 Register data_val = Rtmp1;
iveresov@2146 2596 jobject2reg(md->constant_encoding(), mdo);
iveresov@2146 2597 if (mdo_offset_bias > 0) {
iveresov@2146 2598 __ set(mdo_offset_bias, data_val);
iveresov@2146 2599 __ add(mdo, data_val, mdo);
iveresov@2146 2600 }
iveresov@2146 2601 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
iveresov@2146 2602 __ ldub(flags_addr, data_val);
iveresov@2146 2603 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
iveresov@2146 2604 __ stb(data_val, flags_addr);
iveresov@2146 2605 __ ba(false, done);
iveresov@2146 2606 __ delayed()->nop();
iveresov@2146 2607 __ bind(not_null);
iveresov@2146 2608 } else {
iveresov@2146 2609 __ br_null(value, false, Assembler::pn, done);
iveresov@2146 2610 __ delayed()->nop();
iveresov@2146 2611 }
iveresov@2344 2612 add_debug_info_for_null_check_here(op->info_for_exception());
iveresov@2344 2613 __ load_klass(array, k_RInfo);
iveresov@2344 2614 __ load_klass(value, klass_RInfo);
duke@435 2615
duke@435 2616 // get instance klass
iveresov@2344 2617 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo);
jrose@1079 2618 // perform the fast part of the checking logic
iveresov@2146 2619 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
jrose@1079 2620
jrose@1079 2621 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
jrose@1079 2622 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
duke@435 2623 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
duke@435 2624 __ delayed()->nop();
duke@435 2625 __ cmp(G3, 0);
iveresov@2146 2626 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
duke@435 2627 __ delayed()->nop();
iveresov@2146 2628 // fall through to the success case
iveresov@2146 2629
iveresov@2146 2630 if (op->should_profile()) {
iveresov@2146 2631 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
iveresov@2146 2632 assert_different_registers(value, mdo, recv, tmp1);
iveresov@2146 2633 __ bind(profile_cast_success);
iveresov@2146 2634 jobject2reg(md->constant_encoding(), mdo);
iveresov@2146 2635 if (mdo_offset_bias > 0) {
iveresov@2146 2636 __ set(mdo_offset_bias, tmp1);
iveresov@2146 2637 __ add(mdo, tmp1, mdo);
iveresov@2146 2638 }
iveresov@2344 2639 __ load_klass(value, recv);
iveresov@2146 2640 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
iveresov@2146 2641 __ ba(false, done);
iveresov@2146 2642 __ delayed()->nop();
iveresov@2146 2643 // Cast failure case
iveresov@2146 2644 __ bind(profile_cast_failure);
iveresov@2146 2645 jobject2reg(md->constant_encoding(), mdo);
iveresov@2146 2646 if (mdo_offset_bias > 0) {
iveresov@2146 2647 __ set(mdo_offset_bias, tmp1);
iveresov@2146 2648 __ add(mdo, tmp1, mdo);
iveresov@2146 2649 }
iveresov@2146 2650 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
iveresov@2146 2651 __ ld_ptr(data_addr, tmp1);
iveresov@2146 2652 __ sub(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2146 2653 __ st_ptr(tmp1, data_addr);
iveresov@2146 2654 __ ba(false, *stub->entry());
iveresov@2146 2655 __ delayed()->nop();
iveresov@2146 2656 }
duke@435 2657 __ bind(done);
iveresov@2146 2658 } else if (code == lir_checkcast) {
iveresov@2146 2659 Register obj = op->object()->as_register();
iveresov@2146 2660 Register dst = op->result_opr()->as_register();
iveresov@2146 2661 Label success;
iveresov@2146 2662 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
iveresov@2146 2663 __ bind(success);
iveresov@2146 2664 __ mov(obj, dst);
duke@435 2665 } else if (code == lir_instanceof) {
duke@435 2666 Register obj = op->object()->as_register();
duke@435 2667 Register dst = op->result_opr()->as_register();
iveresov@2146 2668 Label success, failure, done;
iveresov@2146 2669 emit_typecheck_helper(op, &success, &failure, &failure);
iveresov@2146 2670 __ bind(failure);
iveresov@2146 2671 __ set(0, dst);
iveresov@2146 2672 __ ba(false, done);
iveresov@2146 2673 __ delayed()->nop();
iveresov@2146 2674 __ bind(success);
iveresov@2146 2675 __ set(1, dst);
iveresov@2146 2676 __ bind(done);
duke@435 2677 } else {
duke@435 2678 ShouldNotReachHere();
duke@435 2679 }
duke@435 2680
duke@435 2681 }
duke@435 2682
duke@435 2683
duke@435 2684 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
duke@435 2685 if (op->code() == lir_cas_long) {
duke@435 2686 assert(VM_Version::supports_cx8(), "wrong machine");
duke@435 2687 Register addr = op->addr()->as_pointer_register();
duke@435 2688 Register cmp_value_lo = op->cmp_value()->as_register_lo();
duke@435 2689 Register cmp_value_hi = op->cmp_value()->as_register_hi();
duke@435 2690 Register new_value_lo = op->new_value()->as_register_lo();
duke@435 2691 Register new_value_hi = op->new_value()->as_register_hi();
duke@435 2692 Register t1 = op->tmp1()->as_register();
duke@435 2693 Register t2 = op->tmp2()->as_register();
duke@435 2694 #ifdef _LP64
duke@435 2695 __ mov(cmp_value_lo, t1);
duke@435 2696 __ mov(new_value_lo, t2);
iveresov@2412 2697 // perform the compare and swap operation
iveresov@2412 2698 __ casx(addr, t1, t2);
iveresov@2412 2699 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
iveresov@2412 2700 // overwritten with the original value in "addr" and will be equal to t1.
iveresov@2412 2701 __ cmp(t1, t2);
duke@435 2702 #else
duke@435 2703 // move high and low halves of long values into single registers
duke@435 2704 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
duke@435 2705 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
duke@435 2706 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
duke@435 2707 __ sllx(new_value_hi, 32, t2);
duke@435 2708 __ srl(new_value_lo, 0, new_value_lo);
duke@435 2709 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
duke@435 2710 // perform the compare and swap operation
duke@435 2711 __ casx(addr, t1, t2);
duke@435 2712 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
duke@435 2713 // overwritten with the original value in "addr" and will be equal to t1.
iveresov@2412 2714 // Produce icc flag for 32bit.
iveresov@2412 2715 __ sub(t1, t2, t2);
iveresov@2412 2716 __ srlx(t2, 32, t1);
iveresov@2412 2717 __ orcc(t2, t1, G0);
iveresov@2412 2718 #endif
duke@435 2719 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
duke@435 2720 Register addr = op->addr()->as_pointer_register();
duke@435 2721 Register cmp_value = op->cmp_value()->as_register();
duke@435 2722 Register new_value = op->new_value()->as_register();
duke@435 2723 Register t1 = op->tmp1()->as_register();
duke@435 2724 Register t2 = op->tmp2()->as_register();
duke@435 2725 __ mov(cmp_value, t1);
duke@435 2726 __ mov(new_value, t2);
duke@435 2727 if (op->code() == lir_cas_obj) {
iveresov@2344 2728 if (UseCompressedOops) {
iveresov@2344 2729 __ encode_heap_oop(t1);
iveresov@2344 2730 __ encode_heap_oop(t2);
duke@435 2731 __ cas(addr, t1, t2);
iveresov@2344 2732 } else {
never@2352 2733 __ cas_ptr(addr, t1, t2);
duke@435 2734 }
iveresov@2344 2735 } else {
iveresov@2344 2736 __ cas(addr, t1, t2);
iveresov@2344 2737 }
duke@435 2738 __ cmp(t1, t2);
duke@435 2739 } else {
duke@435 2740 Unimplemented();
duke@435 2741 }
duke@435 2742 }
duke@435 2743
duke@435 2744 void LIR_Assembler::set_24bit_FPU() {
duke@435 2745 Unimplemented();
duke@435 2746 }
duke@435 2747
duke@435 2748
duke@435 2749 void LIR_Assembler::reset_FPU() {
duke@435 2750 Unimplemented();
duke@435 2751 }
duke@435 2752
duke@435 2753
duke@435 2754 void LIR_Assembler::breakpoint() {
duke@435 2755 __ breakpoint_trap();
duke@435 2756 }
duke@435 2757
duke@435 2758
duke@435 2759 void LIR_Assembler::push(LIR_Opr opr) {
duke@435 2760 Unimplemented();
duke@435 2761 }
duke@435 2762
duke@435 2763
duke@435 2764 void LIR_Assembler::pop(LIR_Opr opr) {
duke@435 2765 Unimplemented();
duke@435 2766 }
duke@435 2767
duke@435 2768
duke@435 2769 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
duke@435 2770 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
duke@435 2771 Register dst = dst_opr->as_register();
duke@435 2772 Register reg = mon_addr.base();
duke@435 2773 int offset = mon_addr.disp();
duke@435 2774 // compute pointer to BasicLock
duke@435 2775 if (mon_addr.is_simm13()) {
duke@435 2776 __ add(reg, offset, dst);
duke@435 2777 } else {
duke@435 2778 __ set(offset, dst);
duke@435 2779 __ add(dst, reg, dst);
duke@435 2780 }
duke@435 2781 }
duke@435 2782
duke@435 2783
duke@435 2784 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
duke@435 2785 Register obj = op->obj_opr()->as_register();
duke@435 2786 Register hdr = op->hdr_opr()->as_register();
duke@435 2787 Register lock = op->lock_opr()->as_register();
duke@435 2788
duke@435 2789 // obj may not be an oop
duke@435 2790 if (op->code() == lir_lock) {
duke@435 2791 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
duke@435 2792 if (UseFastLocking) {
duke@435 2793 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 2794 // add debug info for NullPointerException only if one is possible
duke@435 2795 if (op->info() != NULL) {
duke@435 2796 add_debug_info_for_null_check_here(op->info());
duke@435 2797 }
duke@435 2798 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
duke@435 2799 } else {
duke@435 2800 // always do slow locking
duke@435 2801 // note: the slow locking code could be inlined here, however if we use
duke@435 2802 // slow locking, speed doesn't matter anyway and this solution is
duke@435 2803 // simpler and requires less duplicated code - additionally, the
duke@435 2804 // slow locking code is the same in either case which simplifies
duke@435 2805 // debugging
duke@435 2806 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 2807 __ delayed()->nop();
duke@435 2808 }
duke@435 2809 } else {
duke@435 2810 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
duke@435 2811 if (UseFastLocking) {
duke@435 2812 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 2813 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
duke@435 2814 } else {
duke@435 2815 // always do slow unlocking
duke@435 2816 // note: the slow unlocking code could be inlined here, however if we use
duke@435 2817 // slow unlocking, speed doesn't matter anyway and this solution is
duke@435 2818 // simpler and requires less duplicated code - additionally, the
duke@435 2819 // slow unlocking code is the same in either case which simplifies
duke@435 2820 // debugging
duke@435 2821 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 2822 __ delayed()->nop();
duke@435 2823 }
duke@435 2824 }
duke@435 2825 __ bind(*op->stub()->continuation());
duke@435 2826 }
duke@435 2827
duke@435 2828
duke@435 2829 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
duke@435 2830 ciMethod* method = op->profiled_method();
duke@435 2831 int bci = op->profiled_bci();
duke@435 2832
duke@435 2833 // Update counter for all call types
iveresov@2349 2834 ciMethodData* md = method->method_data_or_null();
iveresov@2349 2835 assert(md != NULL, "Sanity");
duke@435 2836 ciProfileData* data = md->bci_to_data(bci);
duke@435 2837 assert(data->is_CounterData(), "need CounterData for calls");
duke@435 2838 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
iveresov@2138 2839 Register mdo = op->mdo()->as_register();
iveresov@2138 2840 #ifdef _LP64
iveresov@2138 2841 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
iveresov@2138 2842 Register tmp1 = op->tmp1()->as_register_lo();
iveresov@2138 2843 #else
duke@435 2844 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
duke@435 2845 Register tmp1 = op->tmp1()->as_register();
iveresov@2138 2846 #endif
jrose@1424 2847 jobject2reg(md->constant_encoding(), mdo);
duke@435 2848 int mdo_offset_bias = 0;
duke@435 2849 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
duke@435 2850 data->size_in_bytes())) {
duke@435 2851 // The offset is large so bias the mdo by the base of the slot so
duke@435 2852 // that the ld can use simm13s to reference the slots of the data
duke@435 2853 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
duke@435 2854 __ set(mdo_offset_bias, O7);
duke@435 2855 __ add(mdo, O7, mdo);
duke@435 2856 }
duke@435 2857
twisti@1162 2858 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
duke@435 2859 Bytecodes::Code bc = method->java_code_at_bci(bci);
duke@435 2860 // Perform additional virtual call profiling for invokevirtual and
duke@435 2861 // invokeinterface bytecodes
duke@435 2862 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
iveresov@2138 2863 C1ProfileVirtualCalls) {
duke@435 2864 assert(op->recv()->is_single_cpu(), "recv must be allocated");
duke@435 2865 Register recv = op->recv()->as_register();
duke@435 2866 assert_different_registers(mdo, tmp1, recv);
duke@435 2867 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
duke@435 2868 ciKlass* known_klass = op->known_holder();
iveresov@2138 2869 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
duke@435 2870 // We know the type that will be seen at this call site; we can
duke@435 2871 // statically update the methodDataOop rather than needing to do
duke@435 2872 // dynamic tests on the receiver type
duke@435 2873
duke@435 2874 // NOTE: we should probably put a lock around this search to
duke@435 2875 // avoid collisions by concurrent compilations
duke@435 2876 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
duke@435 2877 uint i;
duke@435 2878 for (i = 0; i < VirtualCallData::row_limit(); i++) {
duke@435 2879 ciKlass* receiver = vc_data->receiver(i);
duke@435 2880 if (known_klass->equals(receiver)) {
twisti@1162 2881 Address data_addr(mdo, md->byte_offset_of_slot(data,
twisti@1162 2882 VirtualCallData::receiver_count_offset(i)) -
duke@435 2883 mdo_offset_bias);
iveresov@2138 2884 __ ld_ptr(data_addr, tmp1);
duke@435 2885 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2886 __ st_ptr(tmp1, data_addr);
duke@435 2887 return;
duke@435 2888 }
duke@435 2889 }
duke@435 2890
duke@435 2891 // Receiver type not found in profile data; select an empty slot
duke@435 2892
duke@435 2893 // Note that this is less efficient than it should be because it
duke@435 2894 // always does a write to the receiver part of the
duke@435 2895 // VirtualCallData rather than just the first time
duke@435 2896 for (i = 0; i < VirtualCallData::row_limit(); i++) {
duke@435 2897 ciKlass* receiver = vc_data->receiver(i);
duke@435 2898 if (receiver == NULL) {
twisti@1162 2899 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
duke@435 2900 mdo_offset_bias);
jrose@1424 2901 jobject2reg(known_klass->constant_encoding(), tmp1);
duke@435 2902 __ st_ptr(tmp1, recv_addr);
twisti@1162 2903 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
duke@435 2904 mdo_offset_bias);
iveresov@2138 2905 __ ld_ptr(data_addr, tmp1);
duke@435 2906 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2907 __ st_ptr(tmp1, data_addr);
duke@435 2908 return;
duke@435 2909 }
duke@435 2910 }
duke@435 2911 } else {
iveresov@2344 2912 __ load_klass(recv, recv);
duke@435 2913 Label update_done;
iveresov@2138 2914 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
kvn@1686 2915 // Receiver did not match any saved receiver and there is no empty row for it.
kvn@1686 2916 // Increment total counter to indicate polymorphic case.
iveresov@2138 2917 __ ld_ptr(counter_addr, tmp1);
kvn@1686 2918 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2919 __ st_ptr(tmp1, counter_addr);
duke@435 2920
duke@435 2921 __ bind(update_done);
duke@435 2922 }
kvn@1686 2923 } else {
kvn@1686 2924 // Static call
iveresov@2138 2925 __ ld_ptr(counter_addr, tmp1);
kvn@1686 2926 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2927 __ st_ptr(tmp1, counter_addr);
duke@435 2928 }
duke@435 2929 }
duke@435 2930
duke@435 2931 void LIR_Assembler::align_backward_branch_target() {
kvn@1800 2932 __ align(OptoLoopAlignment);
duke@435 2933 }
duke@435 2934
duke@435 2935
duke@435 2936 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
duke@435 2937 // make sure we are expecting a delay
duke@435 2938 // this has the side effect of clearing the delay state
duke@435 2939 // so we can use _masm instead of _masm->delayed() to do the
duke@435 2940 // code generation.
duke@435 2941 __ delayed();
duke@435 2942
duke@435 2943 // make sure we only emit one instruction
duke@435 2944 int offset = code_offset();
duke@435 2945 op->delay_op()->emit_code(this);
duke@435 2946 #ifdef ASSERT
duke@435 2947 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
duke@435 2948 op->delay_op()->print();
duke@435 2949 }
duke@435 2950 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
duke@435 2951 "only one instruction can go in a delay slot");
duke@435 2952 #endif
duke@435 2953
duke@435 2954 // we may also be emitting the call info for the instruction
duke@435 2955 // which we are the delay slot of.
twisti@1919 2956 CodeEmitInfo* call_info = op->call_info();
duke@435 2957 if (call_info) {
duke@435 2958 add_call_info(code_offset(), call_info);
duke@435 2959 }
duke@435 2960
duke@435 2961 if (VerifyStackAtCalls) {
duke@435 2962 _masm->sub(FP, SP, O7);
duke@435 2963 _masm->cmp(O7, initial_frame_size_in_bytes());
duke@435 2964 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
duke@435 2965 }
duke@435 2966 }
duke@435 2967
duke@435 2968
duke@435 2969 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
duke@435 2970 assert(left->is_register(), "can only handle registers");
duke@435 2971
duke@435 2972 if (left->is_single_cpu()) {
duke@435 2973 __ neg(left->as_register(), dest->as_register());
duke@435 2974 } else if (left->is_single_fpu()) {
duke@435 2975 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
duke@435 2976 } else if (left->is_double_fpu()) {
duke@435 2977 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
duke@435 2978 } else {
duke@435 2979 assert (left->is_double_cpu(), "Must be a long");
duke@435 2980 Register Rlow = left->as_register_lo();
duke@435 2981 Register Rhi = left->as_register_hi();
duke@435 2982 #ifdef _LP64
duke@435 2983 __ sub(G0, Rlow, dest->as_register_lo());
duke@435 2984 #else
duke@435 2985 __ subcc(G0, Rlow, dest->as_register_lo());
duke@435 2986 __ subc (G0, Rhi, dest->as_register_hi());
duke@435 2987 #endif
duke@435 2988 }
duke@435 2989 }
duke@435 2990
duke@435 2991
duke@435 2992 void LIR_Assembler::fxch(int i) {
duke@435 2993 Unimplemented();
duke@435 2994 }
duke@435 2995
duke@435 2996 void LIR_Assembler::fld(int i) {
duke@435 2997 Unimplemented();
duke@435 2998 }
duke@435 2999
duke@435 3000 void LIR_Assembler::ffree(int i) {
duke@435 3001 Unimplemented();
duke@435 3002 }
duke@435 3003
duke@435 3004 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
duke@435 3005 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
duke@435 3006
duke@435 3007 // if tmp is invalid, then the function being called doesn't destroy the thread
duke@435 3008 if (tmp->is_valid()) {
duke@435 3009 __ save_thread(tmp->as_register());
duke@435 3010 }
duke@435 3011 __ call(dest, relocInfo::runtime_call_type);
duke@435 3012 __ delayed()->nop();
duke@435 3013 if (info != NULL) {
duke@435 3014 add_call_info_here(info);
duke@435 3015 }
duke@435 3016 if (tmp->is_valid()) {
duke@435 3017 __ restore_thread(tmp->as_register());
duke@435 3018 }
duke@435 3019
duke@435 3020 #ifdef ASSERT
duke@435 3021 __ verify_thread();
duke@435 3022 #endif // ASSERT
duke@435 3023 }
duke@435 3024
duke@435 3025
duke@435 3026 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
duke@435 3027 #ifdef _LP64
duke@435 3028 ShouldNotReachHere();
duke@435 3029 #endif
duke@435 3030
duke@435 3031 NEEDS_CLEANUP;
duke@435 3032 if (type == T_LONG) {
duke@435 3033 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
duke@435 3034
duke@435 3035 // (extended to allow indexed as well as constant displaced for JSR-166)
duke@435 3036 Register idx = noreg; // contains either constant offset or index
duke@435 3037
duke@435 3038 int disp = mem_addr->disp();
duke@435 3039 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
duke@435 3040 if (!Assembler::is_simm13(disp)) {
duke@435 3041 idx = O7;
duke@435 3042 __ set(disp, idx);
duke@435 3043 }
duke@435 3044 } else {
duke@435 3045 assert(disp == 0, "not both indexed and disp");
duke@435 3046 idx = mem_addr->index()->as_register();
duke@435 3047 }
duke@435 3048
duke@435 3049 int null_check_offset = -1;
duke@435 3050
duke@435 3051 Register base = mem_addr->base()->as_register();
duke@435 3052 if (src->is_register() && dest->is_address()) {
duke@435 3053 // G4 is high half, G5 is low half
duke@435 3054 if (VM_Version::v9_instructions_work()) {
duke@435 3055 // clear the top bits of G5, and scale up G4
duke@435 3056 __ srl (src->as_register_lo(), 0, G5);
duke@435 3057 __ sllx(src->as_register_hi(), 32, G4);
duke@435 3058 // combine the two halves into the 64 bits of G4
duke@435 3059 __ or3(G4, G5, G4);
duke@435 3060 null_check_offset = __ offset();
duke@435 3061 if (idx == noreg) {
duke@435 3062 __ stx(G4, base, disp);
duke@435 3063 } else {
duke@435 3064 __ stx(G4, base, idx);
duke@435 3065 }
duke@435 3066 } else {
duke@435 3067 __ mov (src->as_register_hi(), G4);
duke@435 3068 __ mov (src->as_register_lo(), G5);
duke@435 3069 null_check_offset = __ offset();
duke@435 3070 if (idx == noreg) {
duke@435 3071 __ std(G4, base, disp);
duke@435 3072 } else {
duke@435 3073 __ std(G4, base, idx);
duke@435 3074 }
duke@435 3075 }
duke@435 3076 } else if (src->is_address() && dest->is_register()) {
duke@435 3077 null_check_offset = __ offset();
duke@435 3078 if (VM_Version::v9_instructions_work()) {
duke@435 3079 if (idx == noreg) {
duke@435 3080 __ ldx(base, disp, G5);
duke@435 3081 } else {
duke@435 3082 __ ldx(base, idx, G5);
duke@435 3083 }
duke@435 3084 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
duke@435 3085 __ mov (G5, dest->as_register_lo()); // copy low half into lo
duke@435 3086 } else {
duke@435 3087 if (idx == noreg) {
duke@435 3088 __ ldd(base, disp, G4);
duke@435 3089 } else {
duke@435 3090 __ ldd(base, idx, G4);
duke@435 3091 }
duke@435 3092 // G4 is high half, G5 is low half
duke@435 3093 __ mov (G4, dest->as_register_hi());
duke@435 3094 __ mov (G5, dest->as_register_lo());
duke@435 3095 }
duke@435 3096 } else {
duke@435 3097 Unimplemented();
duke@435 3098 }
duke@435 3099 if (info != NULL) {
duke@435 3100 add_debug_info_for_null_check(null_check_offset, info);
duke@435 3101 }
duke@435 3102
duke@435 3103 } else {
duke@435 3104 // use normal move for all other volatiles since they don't need
duke@435 3105 // special handling to remain atomic.
iveresov@2344 3106 move_op(src, dest, type, lir_patch_none, info, false, false, false);
duke@435 3107 }
duke@435 3108 }
duke@435 3109
duke@435 3110 void LIR_Assembler::membar() {
duke@435 3111 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
duke@435 3112 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
duke@435 3113 }
duke@435 3114
duke@435 3115 void LIR_Assembler::membar_acquire() {
duke@435 3116 // no-op on TSO
duke@435 3117 }
duke@435 3118
duke@435 3119 void LIR_Assembler::membar_release() {
duke@435 3120 // no-op on TSO
duke@435 3121 }
duke@435 3122
iveresov@2138 3123 // Pack two sequential registers containing 32 bit values
duke@435 3124 // into a single 64 bit register.
iveresov@2138 3125 // src and src->successor() are packed into dst
iveresov@2138 3126 // src and dst may be the same register.
iveresov@2138 3127 // Note: src is destroyed
iveresov@2138 3128 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
iveresov@2138 3129 Register rs = src->as_register();
iveresov@2138 3130 Register rd = dst->as_register_lo();
duke@435 3131 __ sllx(rs, 32, rs);
duke@435 3132 __ srl(rs->successor(), 0, rs->successor());
duke@435 3133 __ or3(rs, rs->successor(), rd);
duke@435 3134 }
duke@435 3135
iveresov@2138 3136 // Unpack a 64 bit value in a register into
duke@435 3137 // two sequential registers.
iveresov@2138 3138 // src is unpacked into dst and dst->successor()
iveresov@2138 3139 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
iveresov@2138 3140 Register rs = src->as_register_lo();
iveresov@2138 3141 Register rd = dst->as_register_hi();
iveresov@2138 3142 assert_different_registers(rs, rd, rd->successor());
iveresov@2138 3143 __ srlx(rs, 32, rd);
iveresov@2138 3144 __ srl (rs, 0, rd->successor());
duke@435 3145 }
duke@435 3146
duke@435 3147
duke@435 3148 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
duke@435 3149 LIR_Address* addr = addr_opr->as_address_ptr();
duke@435 3150 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
iveresov@2138 3151
iveresov@2138 3152 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
duke@435 3153 }
duke@435 3154
duke@435 3155
duke@435 3156 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
duke@435 3157 assert(result_reg->is_register(), "check");
duke@435 3158 __ mov(G2_thread, result_reg->as_register());
duke@435 3159 }
duke@435 3160
duke@435 3161
duke@435 3162 void LIR_Assembler::peephole(LIR_List* lir) {
duke@435 3163 LIR_OpList* inst = lir->instructions_list();
duke@435 3164 for (int i = 0; i < inst->length(); i++) {
duke@435 3165 LIR_Op* op = inst->at(i);
duke@435 3166 switch (op->code()) {
duke@435 3167 case lir_cond_float_branch:
duke@435 3168 case lir_branch: {
duke@435 3169 LIR_OpBranch* branch = op->as_OpBranch();
duke@435 3170 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
duke@435 3171 LIR_Op* delay_op = NULL;
duke@435 3172 // we'd like to be able to pull following instructions into
duke@435 3173 // this slot but we don't know enough to do it safely yet so
duke@435 3174 // only optimize block to block control flow.
duke@435 3175 if (LIRFillDelaySlots && branch->block()) {
duke@435 3176 LIR_Op* prev = inst->at(i - 1);
duke@435 3177 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
duke@435 3178 // swap previous instruction into delay slot
duke@435 3179 inst->at_put(i - 1, op);
duke@435 3180 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
duke@435 3181 #ifndef PRODUCT
duke@435 3182 if (LIRTracePeephole) {
duke@435 3183 tty->print_cr("delayed");
duke@435 3184 inst->at(i - 1)->print();
duke@435 3185 inst->at(i)->print();
twisti@1919 3186 tty->cr();
duke@435 3187 }
duke@435 3188 #endif
duke@435 3189 continue;
duke@435 3190 }
duke@435 3191 }
duke@435 3192
duke@435 3193 if (!delay_op) {
duke@435 3194 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
duke@435 3195 }
duke@435 3196 inst->insert_before(i + 1, delay_op);
duke@435 3197 break;
duke@435 3198 }
duke@435 3199 case lir_static_call:
duke@435 3200 case lir_virtual_call:
duke@435 3201 case lir_icvirtual_call:
twisti@1919 3202 case lir_optvirtual_call:
twisti@1919 3203 case lir_dynamic_call: {
duke@435 3204 LIR_Op* prev = inst->at(i - 1);
duke@435 3205 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
duke@435 3206 (op->code() != lir_virtual_call ||
duke@435 3207 !prev->result_opr()->is_single_cpu() ||
duke@435 3208 prev->result_opr()->as_register() != O0) &&
duke@435 3209 LIR_Assembler::is_single_instruction(prev)) {
duke@435 3210 // Only moves without info can be put into the delay slot.
duke@435 3211 // Also don't allow the setup of the receiver in the delay
duke@435 3212 // slot for vtable calls.
duke@435 3213 inst->at_put(i - 1, op);
duke@435 3214 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
duke@435 3215 #ifndef PRODUCT
duke@435 3216 if (LIRTracePeephole) {
duke@435 3217 tty->print_cr("delayed");
duke@435 3218 inst->at(i - 1)->print();
duke@435 3219 inst->at(i)->print();
twisti@1919 3220 tty->cr();
duke@435 3221 }
duke@435 3222 #endif
iveresov@2138 3223 } else {
iveresov@2138 3224 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
iveresov@2138 3225 inst->insert_before(i + 1, delay_op);
iveresov@2138 3226 i++;
duke@435 3227 }
duke@435 3228
iveresov@2138 3229 #if defined(TIERED) && !defined(_LP64)
iveresov@2138 3230 // fixup the return value from G1 to O0/O1 for long returns.
iveresov@2138 3231 // It's done here instead of in LIRGenerator because there's
iveresov@2138 3232 // such a mismatch between the single reg and double reg
iveresov@2138 3233 // calling convention.
iveresov@2138 3234 LIR_OpJavaCall* callop = op->as_OpJavaCall();
iveresov@2138 3235 if (callop->result_opr() == FrameMap::out_long_opr) {
iveresov@2138 3236 LIR_OpJavaCall* call;
iveresov@2138 3237 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
iveresov@2138 3238 for (int a = 0; a < arguments->length(); a++) {
iveresov@2138 3239 arguments[a] = callop->arguments()[a];
iveresov@2138 3240 }
iveresov@2138 3241 if (op->code() == lir_virtual_call) {
iveresov@2138 3242 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
iveresov@2138 3243 callop->vtable_offset(), arguments, callop->info());
iveresov@2138 3244 } else {
iveresov@2138 3245 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
iveresov@2138 3246 callop->addr(), arguments, callop->info());
iveresov@2138 3247 }
iveresov@2138 3248 inst->at_put(i - 1, call);
iveresov@2138 3249 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
iveresov@2138 3250 T_LONG, lir_patch_none, NULL));
iveresov@2138 3251 }
iveresov@2138 3252 #endif
duke@435 3253 break;
duke@435 3254 }
duke@435 3255 }
duke@435 3256 }
duke@435 3257 }
duke@435 3258
duke@435 3259
duke@435 3260
duke@435 3261
duke@435 3262 #undef __

mercurial