src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp

Sat, 07 Nov 2020 10:30:02 +0800

author
aoqi
date
Sat, 07 Nov 2020 10:30:02 +0800
changeset 10026
8c95980d0b66
parent 9852
70aa912cebe5
permissions
-rw-r--r--

Added tag mips-jdk8u275-b01 for changeset d3b4d62f391f

duke@435 1 /*
drchase@5353 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "c1/c1_Compilation.hpp"
stefank@2314 27 #include "c1/c1_LIRAssembler.hpp"
stefank@2314 28 #include "c1/c1_MacroAssembler.hpp"
stefank@2314 29 #include "c1/c1_Runtime1.hpp"
stefank@2314 30 #include "c1/c1_ValueStack.hpp"
stefank@2314 31 #include "ci/ciArrayKlass.hpp"
stefank@2314 32 #include "ci/ciInstance.hpp"
stefank@2314 33 #include "gc_interface/collectedHeap.hpp"
stefank@2314 34 #include "memory/barrierSet.hpp"
stefank@2314 35 #include "memory/cardTableModRefBS.hpp"
stefank@2314 36 #include "nativeInst_sparc.hpp"
stefank@2314 37 #include "oops/objArrayKlass.hpp"
stefank@2314 38 #include "runtime/sharedRuntime.hpp"
duke@435 39
duke@435 40 #define __ _masm->
duke@435 41
duke@435 42
duke@435 43 //------------------------------------------------------------
duke@435 44
duke@435 45
duke@435 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
duke@435 47 if (opr->is_constant()) {
duke@435 48 LIR_Const* constant = opr->as_constant_ptr();
duke@435 49 switch (constant->type()) {
duke@435 50 case T_INT: {
duke@435 51 jint value = constant->as_jint();
duke@435 52 return Assembler::is_simm13(value);
duke@435 53 }
duke@435 54
duke@435 55 default:
duke@435 56 return false;
duke@435 57 }
duke@435 58 }
duke@435 59 return false;
duke@435 60 }
duke@435 61
duke@435 62
duke@435 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
duke@435 64 switch (op->code()) {
duke@435 65 case lir_null_check:
duke@435 66 return true;
duke@435 67
duke@435 68
duke@435 69 case lir_add:
duke@435 70 case lir_ushr:
duke@435 71 case lir_shr:
duke@435 72 case lir_shl:
duke@435 73 // integer shifts and adds are always one instruction
duke@435 74 return op->result_opr()->is_single_cpu();
duke@435 75
duke@435 76
duke@435 77 case lir_move: {
duke@435 78 LIR_Op1* op1 = op->as_Op1();
duke@435 79 LIR_Opr src = op1->in_opr();
duke@435 80 LIR_Opr dst = op1->result_opr();
duke@435 81
duke@435 82 if (src == dst) {
duke@435 83 NEEDS_CLEANUP;
duke@435 84 // this works around a problem where moves with the same src and dst
duke@435 85 // end up in the delay slot and then the assembler swallows the mov
duke@435 86 // since it has no effect and then it complains because the delay slot
duke@435 87 // is empty. returning false stops the optimizer from putting this in
duke@435 88 // the delay slot
duke@435 89 return false;
duke@435 90 }
duke@435 91
duke@435 92 // don't put moves involving oops into the delay slot since the VerifyOops code
duke@435 93 // will make it much larger than a single instruction.
duke@435 94 if (VerifyOops) {
duke@435 95 return false;
duke@435 96 }
duke@435 97
duke@435 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
duke@435 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
duke@435 100 return false;
duke@435 101 }
duke@435 102
iveresov@2344 103 if (UseCompressedOops) {
iveresov@2344 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
iveresov@2344 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
iveresov@2344 106 }
iveresov@2344 107
ehelin@5694 108 if (UseCompressedClassPointers) {
roland@4159 109 if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
roland@4159 110 src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
roland@4159 111 }
roland@4159 112
duke@435 113 if (dst->is_register()) {
duke@435 114 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
duke@435 115 return !PatchALot;
duke@435 116 } else if (src->is_single_stack()) {
duke@435 117 return true;
duke@435 118 }
duke@435 119 }
duke@435 120
duke@435 121 if (src->is_register()) {
duke@435 122 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
duke@435 123 return !PatchALot;
duke@435 124 } else if (dst->is_single_stack()) {
duke@435 125 return true;
duke@435 126 }
duke@435 127 }
duke@435 128
duke@435 129 if (dst->is_register() &&
duke@435 130 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
duke@435 131 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
duke@435 132 return true;
duke@435 133 }
duke@435 134
duke@435 135 return false;
duke@435 136 }
duke@435 137
duke@435 138 default:
duke@435 139 return false;
duke@435 140 }
duke@435 141 ShouldNotReachHere();
duke@435 142 }
duke@435 143
duke@435 144
duke@435 145 LIR_Opr LIR_Assembler::receiverOpr() {
duke@435 146 return FrameMap::O0_oop_opr;
duke@435 147 }
duke@435 148
duke@435 149
duke@435 150 LIR_Opr LIR_Assembler::osrBufferPointer() {
duke@435 151 return FrameMap::I0_opr;
duke@435 152 }
duke@435 153
duke@435 154
roland@6723 155 int LIR_Assembler::initial_frame_size_in_bytes() const {
duke@435 156 return in_bytes(frame_map()->framesize_in_bytes());
duke@435 157 }
duke@435 158
duke@435 159
duke@435 160 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
duke@435 161 // we fetch the class of the receiver (O0) and compare it with the cached class.
duke@435 162 // If they do not match we jump to slow case.
duke@435 163 int LIR_Assembler::check_icache() {
duke@435 164 int offset = __ offset();
duke@435 165 __ inline_cache_check(O0, G5_inline_cache_reg);
duke@435 166 return offset;
duke@435 167 }
duke@435 168
duke@435 169
duke@435 170 void LIR_Assembler::osr_entry() {
duke@435 171 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
duke@435 172 //
duke@435 173 // 1. Create a new compiled activation.
duke@435 174 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
duke@435 175 // at the osr_bci; it is not initialized.
duke@435 176 // 3. Jump to the continuation address in compiled code to resume execution.
duke@435 177
duke@435 178 // OSR entry point
duke@435 179 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
duke@435 180 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
duke@435 181 ValueStack* entry_state = osr_entry->end()->state();
duke@435 182 int number_of_locks = entry_state->locks_size();
duke@435 183
duke@435 184 // Create a frame for the compiled activation.
roland@6723 185 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
duke@435 186
duke@435 187 // OSR buffer is
duke@435 188 //
duke@435 189 // locals[nlocals-1..0]
duke@435 190 // monitors[number_of_locks-1..0]
duke@435 191 //
duke@435 192 // locals is a direct copy of the interpreter frame so in the osr buffer
duke@435 193 // so first slot in the local array is the last local from the interpreter
duke@435 194 // and last slot is local[0] (receiver) from the interpreter
duke@435 195 //
duke@435 196 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
duke@435 197 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
duke@435 198 // in the interpreter frame (the method lock if a sync method)
duke@435 199
duke@435 200 // Initialize monitors in the compiled activation.
duke@435 201 // I0: pointer to osr buffer
duke@435 202 //
duke@435 203 // All other registers are dead at this point and the locals will be
duke@435 204 // copied into place by code emitted in the IR.
duke@435 205
duke@435 206 Register OSR_buf = osrBufferPointer()->as_register();
duke@435 207 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
duke@435 208 int monitor_offset = BytesPerWord * method()->max_locals() +
roland@1495 209 (2 * BytesPerWord) * (number_of_locks - 1);
roland@1495 210 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
roland@1495 211 // the OSR buffer using 2 word entries: first the lock and then
roland@1495 212 // the oop.
duke@435 213 for (int i = 0; i < number_of_locks; i++) {
roland@1495 214 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
duke@435 215 #ifdef ASSERT
duke@435 216 // verify the interpreter's monitor has a non-null object
duke@435 217 {
duke@435 218 Label L;
roland@1495 219 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
kvn@3037 220 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L);
duke@435 221 __ stop("locked object is NULL");
duke@435 222 __ bind(L);
duke@435 223 }
duke@435 224 #endif // ASSERT
duke@435 225 // Copy the lock field into the compiled activation.
roland@1495 226 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
duke@435 227 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
roland@1495 228 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
duke@435 229 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
duke@435 230 }
duke@435 231 }
duke@435 232 }
duke@435 233
duke@435 234
duke@435 235 // Optimized Library calls
duke@435 236 // This is the fast version of java.lang.String.compare; it has not
duke@435 237 // OSR-entry and therefore, we generate a slow version for OSR's
duke@435 238 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
duke@435 239 Register str0 = left->as_register();
duke@435 240 Register str1 = right->as_register();
duke@435 241
duke@435 242 Label Ldone;
duke@435 243
duke@435 244 Register result = dst->as_register();
duke@435 245 {
kvn@3760 246 // Get a pointer to the first character of string0 in tmp0
kvn@3760 247 // and get string0.length() in str0
kvn@3760 248 // Get a pointer to the first character of string1 in tmp1
kvn@3760 249 // and get string1.length() in str1
kvn@3760 250 // Also, get string0.length()-string1.length() in
kvn@3760 251 // o7 and get the condition code set
duke@435 252 // Note: some instructions have been hoisted for better instruction scheduling
duke@435 253
duke@435 254 Register tmp0 = L0;
duke@435 255 Register tmp1 = L1;
duke@435 256 Register tmp2 = L2;
duke@435 257
duke@435 258 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
kvn@3760 259 if (java_lang_String::has_offset_field()) {
kvn@3760 260 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
kvn@3760 261 int count_offset = java_lang_String:: count_offset_in_bytes();
kvn@3760 262 __ load_heap_oop(str0, value_offset, tmp0);
kvn@3760 263 __ ld(str0, offset_offset, tmp2);
kvn@3760 264 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
kvn@3760 265 __ ld(str0, count_offset, str0);
kvn@3760 266 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
kvn@3760 267 } else {
kvn@3760 268 __ load_heap_oop(str0, value_offset, tmp1);
kvn@3760 269 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
kvn@3760 270 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0);
kvn@3760 271 }
duke@435 272
duke@435 273 // str1 may be null
duke@435 274 add_debug_info_for_null_check_here(info);
duke@435 275
kvn@3760 276 if (java_lang_String::has_offset_field()) {
kvn@3760 277 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
kvn@3760 278 int count_offset = java_lang_String:: count_offset_in_bytes();
kvn@3760 279 __ load_heap_oop(str1, value_offset, tmp1);
kvn@3760 280 __ add(tmp0, tmp2, tmp0);
kvn@3760 281
kvn@3760 282 __ ld(str1, offset_offset, tmp2);
kvn@3760 283 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
kvn@3760 284 __ ld(str1, count_offset, str1);
kvn@3760 285 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
kvn@3760 286 __ add(tmp1, tmp2, tmp1);
kvn@3760 287 } else {
kvn@3760 288 __ load_heap_oop(str1, value_offset, tmp2);
kvn@3760 289 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
kvn@3760 290 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1);
kvn@3760 291 }
duke@435 292 __ subcc(str0, str1, O7);
duke@435 293 }
duke@435 294
duke@435 295 {
duke@435 296 // Compute the minimum of the string lengths, scale it and store it in limit
duke@435 297 Register count0 = I0;
duke@435 298 Register count1 = I1;
duke@435 299 Register limit = L3;
duke@435 300
duke@435 301 Label Lskip;
duke@435 302 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
duke@435 303 __ br(Assembler::greater, true, Assembler::pt, Lskip);
duke@435 304 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
duke@435 305 __ bind(Lskip);
duke@435 306
duke@435 307 // If either string is empty (or both of them) the result is the difference in lengths
duke@435 308 __ cmp(limit, 0);
duke@435 309 __ br(Assembler::equal, true, Assembler::pn, Ldone);
duke@435 310 __ delayed()->mov(O7, result); // result is difference in lengths
duke@435 311 }
duke@435 312
duke@435 313 {
duke@435 314 // Neither string is empty
duke@435 315 Label Lloop;
duke@435 316
duke@435 317 Register base0 = L0;
duke@435 318 Register base1 = L1;
duke@435 319 Register chr0 = I0;
duke@435 320 Register chr1 = I1;
duke@435 321 Register limit = L3;
duke@435 322
duke@435 323 // Shift base0 and base1 to the end of the arrays, negate limit
duke@435 324 __ add(base0, limit, base0);
duke@435 325 __ add(base1, limit, base1);
kvn@3760 326 __ neg(limit); // limit = -min{string0.length(), string1.length()}
duke@435 327
duke@435 328 __ lduh(base0, limit, chr0);
duke@435 329 __ bind(Lloop);
duke@435 330 __ lduh(base1, limit, chr1);
duke@435 331 __ subcc(chr0, chr1, chr0);
duke@435 332 __ br(Assembler::notZero, false, Assembler::pn, Ldone);
duke@435 333 assert(chr0 == result, "result must be pre-placed");
duke@435 334 __ delayed()->inccc(limit, sizeof(jchar));
duke@435 335 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
duke@435 336 __ delayed()->lduh(base0, limit, chr0);
duke@435 337 }
duke@435 338
duke@435 339 // If strings are equal up to min length, return the length difference.
duke@435 340 __ mov(O7, result);
duke@435 341
duke@435 342 // Otherwise, return the difference between the first mismatched chars.
duke@435 343 __ bind(Ldone);
duke@435 344 }
duke@435 345
duke@435 346
duke@435 347 // --------------------------------------------------------------------------------------------
duke@435 348
duke@435 349 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
duke@435 350 if (!GenerateSynchronizationCode) return;
duke@435 351
duke@435 352 Register obj_reg = obj_opr->as_register();
duke@435 353 Register lock_reg = lock_opr->as_register();
duke@435 354
duke@435 355 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
duke@435 356 Register reg = mon_addr.base();
duke@435 357 int offset = mon_addr.disp();
duke@435 358 // compute pointer to BasicLock
duke@435 359 if (mon_addr.is_simm13()) {
duke@435 360 __ add(reg, offset, lock_reg);
duke@435 361 }
duke@435 362 else {
duke@435 363 __ set(offset, lock_reg);
duke@435 364 __ add(reg, lock_reg, lock_reg);
duke@435 365 }
duke@435 366 // unlock object
duke@435 367 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
duke@435 368 // _slow_case_stubs->append(slow_case);
duke@435 369 // temporary fix: must be created after exceptionhandler, therefore as call stub
duke@435 370 _slow_case_stubs->append(slow_case);
duke@435 371 if (UseFastLocking) {
duke@435 372 // try inlined fast unlocking first, revert to slow locking if it fails
duke@435 373 // note: lock_reg points to the displaced header since the displaced header offset is 0!
duke@435 374 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 375 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
duke@435 376 } else {
duke@435 377 // always do slow unlocking
duke@435 378 // note: the slow unlocking code could be inlined here, however if we use
duke@435 379 // slow unlocking, speed doesn't matter anyway and this solution is
duke@435 380 // simpler and requires less duplicated code - additionally, the
duke@435 381 // slow unlocking code is the same in either case which simplifies
duke@435 382 // debugging
duke@435 383 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
duke@435 384 __ delayed()->nop();
duke@435 385 }
duke@435 386 // done
duke@435 387 __ bind(*slow_case->continuation());
duke@435 388 }
duke@435 389
duke@435 390
twisti@1639 391 int LIR_Assembler::emit_exception_handler() {
duke@435 392 // if the last instruction is a call (typically to do a throw which
duke@435 393 // is coming at the end after block reordering) the return address
duke@435 394 // must still point into the code area in order to avoid assertion
duke@435 395 // failures when searching for the corresponding bci => add a nop
duke@435 396 // (was bug 5/14/1999 - gri)
duke@435 397 __ nop();
duke@435 398
duke@435 399 // generate code for exception handler
duke@435 400 ciMethod* method = compilation()->method();
duke@435 401
duke@435 402 address handler_base = __ start_a_stub(exception_handler_size);
duke@435 403
duke@435 404 if (handler_base == NULL) {
duke@435 405 // not enough space left for the handler
duke@435 406 bailout("exception handler overflow");
twisti@1639 407 return -1;
duke@435 408 }
twisti@1639 409
duke@435 410 int offset = code_offset();
duke@435 411
twisti@2603 412 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
duke@435 413 __ delayed()->nop();
twisti@2603 414 __ should_not_reach_here();
iveresov@3435 415 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
duke@435 416 __ end_a_stub();
twisti@1639 417
twisti@1639 418 return offset;
duke@435 419 }
duke@435 420
twisti@1639 421
never@1813 422 // Emit the code to remove the frame from the stack in the exception
never@1813 423 // unwind path.
never@1813 424 int LIR_Assembler::emit_unwind_handler() {
never@1813 425 #ifndef PRODUCT
never@1813 426 if (CommentedAssembly) {
never@1813 427 _masm->block_comment("Unwind handler");
never@1813 428 }
never@1813 429 #endif
never@1813 430
never@1813 431 int offset = code_offset();
never@1813 432
never@1813 433 // Fetch the exception from TLS and clear out exception related thread state
never@1813 434 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
never@1813 435 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
never@1813 436 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
never@1813 437
never@1813 438 __ bind(_unwind_handler_entry);
never@1813 439 __ verify_not_null_oop(O0);
never@1813 440 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
never@1813 441 __ mov(O0, I0); // Preserve the exception
never@1813 442 }
never@1813 443
never@1813 444 // Preform needed unlocking
never@1813 445 MonitorExitStub* stub = NULL;
never@1813 446 if (method()->is_synchronized()) {
never@1813 447 monitor_address(0, FrameMap::I1_opr);
never@1813 448 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
never@1813 449 __ unlock_object(I3, I2, I1, *stub->entry());
never@1813 450 __ bind(*stub->continuation());
never@1813 451 }
never@1813 452
never@1813 453 if (compilation()->env()->dtrace_method_probes()) {
never@2185 454 __ mov(G2_thread, O0);
roland@4051 455 __ save_thread(I1); // need to preserve thread in G2 across
roland@4051 456 // runtime call
coleenp@4037 457 metadata2reg(method()->constant_encoding(), O1);
never@1813 458 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
never@1813 459 __ delayed()->nop();
roland@4051 460 __ restore_thread(I1);
never@1813 461 }
never@1813 462
never@1813 463 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
never@1813 464 __ mov(I0, O0); // Restore the exception
never@1813 465 }
never@1813 466
never@1813 467 // dispatch to the unwind logic
never@1813 468 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
never@1813 469 __ delayed()->nop();
never@1813 470
never@1813 471 // Emit the slow path assembly
never@1813 472 if (stub != NULL) {
never@1813 473 stub->emit_code(this);
never@1813 474 }
never@1813 475
never@1813 476 return offset;
never@1813 477 }
never@1813 478
never@1813 479
twisti@1639 480 int LIR_Assembler::emit_deopt_handler() {
duke@435 481 // if the last instruction is a call (typically to do a throw which
duke@435 482 // is coming at the end after block reordering) the return address
duke@435 483 // must still point into the code area in order to avoid assertion
duke@435 484 // failures when searching for the corresponding bci => add a nop
duke@435 485 // (was bug 5/14/1999 - gri)
duke@435 486 __ nop();
duke@435 487
duke@435 488 // generate code for deopt handler
duke@435 489 ciMethod* method = compilation()->method();
duke@435 490 address handler_base = __ start_a_stub(deopt_handler_size);
duke@435 491 if (handler_base == NULL) {
duke@435 492 // not enough space left for the handler
duke@435 493 bailout("deopt handler overflow");
twisti@1639 494 return -1;
duke@435 495 }
twisti@1639 496
duke@435 497 int offset = code_offset();
twisti@1162 498 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
twisti@1162 499 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
duke@435 500 __ delayed()->nop();
iveresov@3435 501 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
duke@435 502 __ end_a_stub();
twisti@1639 503
twisti@1639 504 return offset;
duke@435 505 }
duke@435 506
duke@435 507
duke@435 508 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
duke@435 509 if (o == NULL) {
duke@435 510 __ set(NULL_WORD, reg);
duke@435 511 } else {
rraghavan@9841 512 #ifdef ASSERT
rraghavan@9841 513 {
rraghavan@9841 514 ThreadInVMfromNative tiv(JavaThread::current());
rraghavan@9841 515 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
rraghavan@9841 516 }
rraghavan@9841 517 #endif
duke@435 518 int oop_index = __ oop_recorder()->find_index(o);
duke@435 519 RelocationHolder rspec = oop_Relocation::spec(oop_index);
duke@435 520 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
duke@435 521 }
duke@435 522 }
duke@435 523
duke@435 524
duke@435 525 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
coleenp@4037 526 // Allocate a new index in table to hold the object once it's been patched
coleenp@4037 527 int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
roland@5628 528 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
duke@435 529
twisti@1162 530 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
twisti@1162 531 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
duke@435 532 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
duke@435 533 // NULL will be dynamically patched later and the patched value may be large. We must
duke@435 534 // therefore generate the sethi/add as a placeholders
twisti@1162 535 __ patchable_set(addrlit, reg);
duke@435 536
duke@435 537 patching_epilog(patch, lir_patch_normal, reg, info);
duke@435 538 }
duke@435 539
duke@435 540
coleenp@4037 541 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
coleenp@4037 542 __ set_metadata_constant(o, reg);
coleenp@4037 543 }
coleenp@4037 544
coleenp@4037 545 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
coleenp@4037 546 // Allocate a new index in table to hold the klass once it's been patched
coleenp@4037 547 int index = __ oop_recorder()->allocate_metadata_index(NULL);
coleenp@4037 548 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
coleenp@4037 549 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index));
coleenp@4037 550 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
coleenp@4037 551 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
coleenp@4037 552 // NULL will be dynamically patched later and the patched value may be large. We must
coleenp@4037 553 // therefore generate the sethi/add as a placeholders
coleenp@4037 554 __ patchable_set(addrlit, reg);
coleenp@4037 555
coleenp@4037 556 patching_epilog(patch, lir_patch_normal, reg, info);
coleenp@4037 557 }
coleenp@4037 558
duke@435 559 void LIR_Assembler::emit_op3(LIR_Op3* op) {
duke@435 560 Register Rdividend = op->in_opr1()->as_register();
duke@435 561 Register Rdivisor = noreg;
duke@435 562 Register Rscratch = op->in_opr3()->as_register();
duke@435 563 Register Rresult = op->result_opr()->as_register();
duke@435 564 int divisor = -1;
duke@435 565
duke@435 566 if (op->in_opr2()->is_register()) {
duke@435 567 Rdivisor = op->in_opr2()->as_register();
duke@435 568 } else {
duke@435 569 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
duke@435 570 assert(Assembler::is_simm13(divisor), "can only handle simm13");
duke@435 571 }
duke@435 572
duke@435 573 assert(Rdividend != Rscratch, "");
duke@435 574 assert(Rdivisor != Rscratch, "");
duke@435 575 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
duke@435 576
duke@435 577 if (Rdivisor == noreg && is_power_of_2(divisor)) {
duke@435 578 // convert division by a power of two into some shifts and logical operations
duke@435 579 if (op->code() == lir_idiv) {
duke@435 580 if (divisor == 2) {
duke@435 581 __ srl(Rdividend, 31, Rscratch);
duke@435 582 } else {
duke@435 583 __ sra(Rdividend, 31, Rscratch);
duke@435 584 __ and3(Rscratch, divisor - 1, Rscratch);
duke@435 585 }
duke@435 586 __ add(Rdividend, Rscratch, Rscratch);
roland@9614 587 __ sra(Rscratch, log2_int(divisor), Rresult);
duke@435 588 return;
duke@435 589 } else {
duke@435 590 if (divisor == 2) {
duke@435 591 __ srl(Rdividend, 31, Rscratch);
duke@435 592 } else {
duke@435 593 __ sra(Rdividend, 31, Rscratch);
duke@435 594 __ and3(Rscratch, divisor - 1,Rscratch);
duke@435 595 }
duke@435 596 __ add(Rdividend, Rscratch, Rscratch);
duke@435 597 __ andn(Rscratch, divisor - 1,Rscratch);
duke@435 598 __ sub(Rdividend, Rscratch, Rresult);
duke@435 599 return;
duke@435 600 }
duke@435 601 }
duke@435 602
duke@435 603 __ sra(Rdividend, 31, Rscratch);
duke@435 604 __ wry(Rscratch);
duke@435 605
duke@435 606 add_debug_info_for_div0_here(op->info());
duke@435 607
duke@435 608 if (Rdivisor != noreg) {
duke@435 609 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 610 } else {
duke@435 611 assert(Assembler::is_simm13(divisor), "can only handle simm13");
duke@435 612 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 613 }
duke@435 614
duke@435 615 Label skip;
duke@435 616 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
duke@435 617 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 618 __ bind(skip);
duke@435 619
duke@435 620 if (op->code() == lir_irem) {
duke@435 621 if (Rdivisor != noreg) {
duke@435 622 __ smul(Rscratch, Rdivisor, Rscratch);
duke@435 623 } else {
duke@435 624 __ smul(Rscratch, divisor, Rscratch);
duke@435 625 }
duke@435 626 __ sub(Rdividend, Rscratch, Rresult);
duke@435 627 }
duke@435 628 }
duke@435 629
duke@435 630
duke@435 631 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
duke@435 632 #ifdef ASSERT
duke@435 633 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
duke@435 634 if (op->block() != NULL) _branch_target_blocks.append(op->block());
duke@435 635 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
duke@435 636 #endif
duke@435 637 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
duke@435 638
duke@435 639 if (op->cond() == lir_cond_always) {
duke@435 640 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
duke@435 641 } else if (op->code() == lir_cond_float_branch) {
duke@435 642 assert(op->ublock() != NULL, "must have unordered successor");
duke@435 643 bool is_unordered = (op->ublock() == op->block());
duke@435 644 Assembler::Condition acond;
duke@435 645 switch (op->cond()) {
duke@435 646 case lir_cond_equal: acond = Assembler::f_equal; break;
duke@435 647 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
duke@435 648 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
duke@435 649 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
duke@435 650 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
duke@435 651 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
duke@435 652 default : ShouldNotReachHere();
duke@435 653 }
duke@435 654 __ fb( acond, false, Assembler::pn, *(op->label()));
duke@435 655 } else {
duke@435 656 assert (op->code() == lir_branch, "just checking");
duke@435 657
duke@435 658 Assembler::Condition acond;
duke@435 659 switch (op->cond()) {
duke@435 660 case lir_cond_equal: acond = Assembler::equal; break;
duke@435 661 case lir_cond_notEqual: acond = Assembler::notEqual; break;
duke@435 662 case lir_cond_less: acond = Assembler::less; break;
duke@435 663 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
duke@435 664 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
duke@435 665 case lir_cond_greater: acond = Assembler::greater; break;
duke@435 666 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
duke@435 667 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
duke@435 668 default: ShouldNotReachHere();
duke@435 669 };
duke@435 670
duke@435 671 // sparc has different condition codes for testing 32-bit
duke@435 672 // vs. 64-bit values. We could always test xcc is we could
duke@435 673 // guarantee that 32-bit loads always sign extended but that isn't
duke@435 674 // true and since sign extension isn't free, it would impose a
duke@435 675 // slight cost.
duke@435 676 #ifdef _LP64
duke@435 677 if (op->type() == T_INT) {
duke@435 678 __ br(acond, false, Assembler::pn, *(op->label()));
duke@435 679 } else
duke@435 680 #endif
duke@435 681 __ brx(acond, false, Assembler::pn, *(op->label()));
duke@435 682 }
duke@435 683 // The peephole pass fills the delay slot
duke@435 684 }
duke@435 685
duke@435 686
duke@435 687 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
duke@435 688 Bytecodes::Code code = op->bytecode();
duke@435 689 LIR_Opr dst = op->result_opr();
duke@435 690
duke@435 691 switch(code) {
duke@435 692 case Bytecodes::_i2l: {
duke@435 693 Register rlo = dst->as_register_lo();
duke@435 694 Register rhi = dst->as_register_hi();
duke@435 695 Register rval = op->in_opr()->as_register();
duke@435 696 #ifdef _LP64
duke@435 697 __ sra(rval, 0, rlo);
duke@435 698 #else
duke@435 699 __ mov(rval, rlo);
duke@435 700 __ sra(rval, BitsPerInt-1, rhi);
duke@435 701 #endif
duke@435 702 break;
duke@435 703 }
duke@435 704 case Bytecodes::_i2d:
duke@435 705 case Bytecodes::_i2f: {
duke@435 706 bool is_double = (code == Bytecodes::_i2d);
duke@435 707 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
duke@435 708 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
duke@435 709 FloatRegister rsrc = op->in_opr()->as_float_reg();
duke@435 710 if (rsrc != rdst) {
duke@435 711 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
duke@435 712 }
duke@435 713 __ fitof(w, rdst, rdst);
duke@435 714 break;
duke@435 715 }
duke@435 716 case Bytecodes::_f2i:{
duke@435 717 FloatRegister rsrc = op->in_opr()->as_float_reg();
duke@435 718 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
duke@435 719 Label L;
duke@435 720 // result must be 0 if value is NaN; test by comparing value to itself
duke@435 721 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
duke@435 722 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
duke@435 723 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
duke@435 724 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
duke@435 725 // move integer result from float register to int register
duke@435 726 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
duke@435 727 __ bind (L);
duke@435 728 break;
duke@435 729 }
duke@435 730 case Bytecodes::_l2i: {
duke@435 731 Register rlo = op->in_opr()->as_register_lo();
duke@435 732 Register rhi = op->in_opr()->as_register_hi();
duke@435 733 Register rdst = dst->as_register();
duke@435 734 #ifdef _LP64
duke@435 735 __ sra(rlo, 0, rdst);
duke@435 736 #else
duke@435 737 __ mov(rlo, rdst);
duke@435 738 #endif
duke@435 739 break;
duke@435 740 }
duke@435 741 case Bytecodes::_d2f:
duke@435 742 case Bytecodes::_f2d: {
duke@435 743 bool is_double = (code == Bytecodes::_f2d);
duke@435 744 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
duke@435 745 LIR_Opr val = op->in_opr();
duke@435 746 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
duke@435 747 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
duke@435 748 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
duke@435 749 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
duke@435 750 __ ftof(vw, dw, rval, rdst);
duke@435 751 break;
duke@435 752 }
duke@435 753 case Bytecodes::_i2s:
duke@435 754 case Bytecodes::_i2b: {
duke@435 755 Register rval = op->in_opr()->as_register();
duke@435 756 Register rdst = dst->as_register();
duke@435 757 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
duke@435 758 __ sll (rval, shift, rdst);
duke@435 759 __ sra (rdst, shift, rdst);
duke@435 760 break;
duke@435 761 }
duke@435 762 case Bytecodes::_i2c: {
duke@435 763 Register rval = op->in_opr()->as_register();
duke@435 764 Register rdst = dst->as_register();
duke@435 765 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
duke@435 766 __ sll (rval, shift, rdst);
duke@435 767 __ srl (rdst, shift, rdst);
duke@435 768 break;
duke@435 769 }
duke@435 770
duke@435 771 default: ShouldNotReachHere();
duke@435 772 }
duke@435 773 }
duke@435 774
duke@435 775
duke@435 776 void LIR_Assembler::align_call(LIR_Code) {
duke@435 777 // do nothing since all instructions are word aligned on sparc
duke@435 778 }
duke@435 779
duke@435 780
twisti@1730 781 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
twisti@1730 782 __ call(op->addr(), rtype);
twisti@1919 783 // The peephole pass fills the delay slot, add_call_info is done in
twisti@1919 784 // LIR_Assembler::emit_delay.
duke@435 785 }
duke@435 786
duke@435 787
twisti@1730 788 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
coleenp@4037 789 __ ic_call(op->addr(), false);
twisti@1919 790 // The peephole pass fills the delay slot, add_call_info is done in
twisti@1919 791 // LIR_Assembler::emit_delay.
duke@435 792 }
duke@435 793
duke@435 794
twisti@1730 795 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
twisti@1730 796 add_debug_info_for_null_check_here(op->info());
iveresov@2344 797 __ load_klass(O0, G3_scratch);
twisti@3310 798 if (Assembler::is_simm13(op->vtable_offset())) {
twisti@1730 799 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
duke@435 800 } else {
duke@435 801 // This will generate 2 instructions
twisti@1730 802 __ set(op->vtable_offset(), G5_method);
duke@435 803 // ld_ptr, set_hi, set
duke@435 804 __ ld_ptr(G3_scratch, G5_method, G5_method);
duke@435 805 }
coleenp@4037 806 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch);
duke@435 807 __ callr(G3_scratch, G0);
duke@435 808 // the peephole pass fills the delay slot
duke@435 809 }
duke@435 810
iveresov@2344 811 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
duke@435 812 int store_offset;
duke@435 813 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
duke@435 814 assert(!unaligned, "can't handle this");
duke@435 815 // for offsets larger than a simm13 we setup the offset in O7
twisti@1162 816 __ set(offset, O7);
iveresov@2344 817 store_offset = store(from_reg, base, O7, type, wide);
duke@435 818 } else {
iveresov@2344 819 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 820 __ verify_oop(from_reg->as_register());
iveresov@2344 821 }
duke@435 822 store_offset = code_offset();
duke@435 823 switch (type) {
duke@435 824 case T_BOOLEAN: // fall through
duke@435 825 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
duke@435 826 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
duke@435 827 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
duke@435 828 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
duke@435 829 case T_LONG :
duke@435 830 #ifdef _LP64
duke@435 831 if (unaligned || PatchALot) {
duke@435 832 __ srax(from_reg->as_register_lo(), 32, O7);
duke@435 833 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
duke@435 834 __ stw(O7, base, offset + hi_word_offset_in_bytes);
duke@435 835 } else {
duke@435 836 __ stx(from_reg->as_register_lo(), base, offset);
duke@435 837 }
duke@435 838 #else
duke@435 839 assert(Assembler::is_simm13(offset + 4), "must be");
duke@435 840 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
duke@435 841 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
duke@435 842 #endif
duke@435 843 break;
iveresov@2344 844 case T_ADDRESS:
roland@4051 845 case T_METADATA:
iveresov@2344 846 __ st_ptr(from_reg->as_register(), base, offset);
iveresov@2344 847 break;
duke@435 848 case T_ARRAY : // fall through
iveresov@2344 849 case T_OBJECT:
iveresov@2344 850 {
iveresov@2344 851 if (UseCompressedOops && !wide) {
iveresov@2344 852 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
iveresov@2344 853 store_offset = code_offset();
iveresov@2344 854 __ stw(G3_scratch, base, offset);
iveresov@2344 855 } else {
iveresov@2344 856 __ st_ptr(from_reg->as_register(), base, offset);
iveresov@2344 857 }
iveresov@2344 858 break;
iveresov@2344 859 }
iveresov@2344 860
duke@435 861 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
duke@435 862 case T_DOUBLE:
duke@435 863 {
duke@435 864 FloatRegister reg = from_reg->as_double_reg();
duke@435 865 // split unaligned stores
duke@435 866 if (unaligned || PatchALot) {
duke@435 867 assert(Assembler::is_simm13(offset + 4), "must be");
duke@435 868 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
duke@435 869 __ stf(FloatRegisterImpl::S, reg, base, offset);
duke@435 870 } else {
duke@435 871 __ stf(FloatRegisterImpl::D, reg, base, offset);
duke@435 872 }
duke@435 873 break;
duke@435 874 }
duke@435 875 default : ShouldNotReachHere();
duke@435 876 }
duke@435 877 }
duke@435 878 return store_offset;
duke@435 879 }
duke@435 880
duke@435 881
iveresov@2344 882 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
iveresov@2344 883 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 884 __ verify_oop(from_reg->as_register());
iveresov@2344 885 }
duke@435 886 int store_offset = code_offset();
duke@435 887 switch (type) {
duke@435 888 case T_BOOLEAN: // fall through
duke@435 889 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
duke@435 890 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
duke@435 891 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
duke@435 892 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
duke@435 893 case T_LONG :
duke@435 894 #ifdef _LP64
duke@435 895 __ stx(from_reg->as_register_lo(), base, disp);
duke@435 896 #else
duke@435 897 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
duke@435 898 __ std(from_reg->as_register_hi(), base, disp);
duke@435 899 #endif
duke@435 900 break;
iveresov@2344 901 case T_ADDRESS:
iveresov@2344 902 __ st_ptr(from_reg->as_register(), base, disp);
iveresov@2344 903 break;
duke@435 904 case T_ARRAY : // fall through
iveresov@2344 905 case T_OBJECT:
iveresov@2344 906 {
iveresov@2344 907 if (UseCompressedOops && !wide) {
iveresov@2344 908 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
iveresov@2344 909 store_offset = code_offset();
iveresov@2344 910 __ stw(G3_scratch, base, disp);
iveresov@2344 911 } else {
iveresov@2344 912 __ st_ptr(from_reg->as_register(), base, disp);
iveresov@2344 913 }
iveresov@2344 914 break;
iveresov@2344 915 }
duke@435 916 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
duke@435 917 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
duke@435 918 default : ShouldNotReachHere();
duke@435 919 }
duke@435 920 return store_offset;
duke@435 921 }
duke@435 922
duke@435 923
iveresov@2344 924 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
duke@435 925 int load_offset;
duke@435 926 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
duke@435 927 assert(base != O7, "destroying register");
duke@435 928 assert(!unaligned, "can't handle this");
duke@435 929 // for offsets larger than a simm13 we setup the offset in O7
twisti@1162 930 __ set(offset, O7);
iveresov@2344 931 load_offset = load(base, O7, to_reg, type, wide);
duke@435 932 } else {
duke@435 933 load_offset = code_offset();
duke@435 934 switch(type) {
duke@435 935 case T_BOOLEAN: // fall through
duke@435 936 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
duke@435 937 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
duke@435 938 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
duke@435 939 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
duke@435 940 case T_LONG :
duke@435 941 if (!unaligned) {
duke@435 942 #ifdef _LP64
duke@435 943 __ ldx(base, offset, to_reg->as_register_lo());
duke@435 944 #else
duke@435 945 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
duke@435 946 "must be sequential");
duke@435 947 __ ldd(base, offset, to_reg->as_register_hi());
duke@435 948 #endif
duke@435 949 } else {
duke@435 950 #ifdef _LP64
duke@435 951 assert(base != to_reg->as_register_lo(), "can't handle this");
roland@1495 952 assert(O7 != to_reg->as_register_lo(), "can't handle this");
duke@435 953 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
roland@1495 954 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
duke@435 955 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
roland@1495 956 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
duke@435 957 #else
duke@435 958 if (base == to_reg->as_register_lo()) {
duke@435 959 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
duke@435 960 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
duke@435 961 } else {
duke@435 962 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
duke@435 963 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
duke@435 964 }
duke@435 965 #endif
duke@435 966 }
duke@435 967 break;
roland@4159 968 case T_METADATA: __ ld_ptr(base, offset, to_reg->as_register()); break;
roland@4159 969 case T_ADDRESS:
roland@4162 970 #ifdef _LP64
ehelin@5694 971 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
roland@4159 972 __ lduw(base, offset, to_reg->as_register());
roland@4162 973 __ decode_klass_not_null(to_reg->as_register());
roland@4162 974 } else
roland@4159 975 #endif
roland@4162 976 {
roland@4159 977 __ ld_ptr(base, offset, to_reg->as_register());
roland@4159 978 }
roland@4159 979 break;
duke@435 980 case T_ARRAY : // fall through
iveresov@2344 981 case T_OBJECT:
iveresov@2344 982 {
iveresov@2344 983 if (UseCompressedOops && !wide) {
iveresov@2344 984 __ lduw(base, offset, to_reg->as_register());
iveresov@2344 985 __ decode_heap_oop(to_reg->as_register());
iveresov@2344 986 } else {
iveresov@2344 987 __ ld_ptr(base, offset, to_reg->as_register());
iveresov@2344 988 }
iveresov@2344 989 break;
iveresov@2344 990 }
duke@435 991 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
duke@435 992 case T_DOUBLE:
duke@435 993 {
duke@435 994 FloatRegister reg = to_reg->as_double_reg();
duke@435 995 // split unaligned loads
duke@435 996 if (unaligned || PatchALot) {
roland@1495 997 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
roland@1495 998 __ ldf(FloatRegisterImpl::S, base, offset, reg);
duke@435 999 } else {
duke@435 1000 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
duke@435 1001 }
duke@435 1002 break;
duke@435 1003 }
duke@435 1004 default : ShouldNotReachHere();
duke@435 1005 }
iveresov@2344 1006 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 1007 __ verify_oop(to_reg->as_register());
iveresov@2344 1008 }
duke@435 1009 }
duke@435 1010 return load_offset;
duke@435 1011 }
duke@435 1012
duke@435 1013
iveresov@2344 1014 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
duke@435 1015 int load_offset = code_offset();
duke@435 1016 switch(type) {
duke@435 1017 case T_BOOLEAN: // fall through
iveresov@2344 1018 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
iveresov@2344 1019 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
iveresov@2344 1020 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
iveresov@2344 1021 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
iveresov@2344 1022 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
duke@435 1023 case T_ARRAY : // fall through
iveresov@2344 1024 case T_OBJECT:
iveresov@2344 1025 {
iveresov@2344 1026 if (UseCompressedOops && !wide) {
iveresov@2344 1027 __ lduw(base, disp, to_reg->as_register());
iveresov@2344 1028 __ decode_heap_oop(to_reg->as_register());
iveresov@2344 1029 } else {
iveresov@2344 1030 __ ld_ptr(base, disp, to_reg->as_register());
iveresov@2344 1031 }
iveresov@2344 1032 break;
iveresov@2344 1033 }
duke@435 1034 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
duke@435 1035 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
duke@435 1036 case T_LONG :
duke@435 1037 #ifdef _LP64
duke@435 1038 __ ldx(base, disp, to_reg->as_register_lo());
duke@435 1039 #else
duke@435 1040 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
duke@435 1041 "must be sequential");
duke@435 1042 __ ldd(base, disp, to_reg->as_register_hi());
duke@435 1043 #endif
duke@435 1044 break;
duke@435 1045 default : ShouldNotReachHere();
duke@435 1046 }
iveresov@2344 1047 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 1048 __ verify_oop(to_reg->as_register());
iveresov@2344 1049 }
duke@435 1050 return load_offset;
duke@435 1051 }
duke@435 1052
duke@435 1053 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
duke@435 1054 LIR_Const* c = src->as_constant_ptr();
duke@435 1055 switch (c->type()) {
duke@435 1056 case T_INT:
iveresov@2344 1057 case T_FLOAT: {
iveresov@2344 1058 Register src_reg = O7;
iveresov@2344 1059 int value = c->as_jint_bits();
iveresov@2344 1060 if (value == 0) {
iveresov@2344 1061 src_reg = G0;
iveresov@2344 1062 } else {
iveresov@2344 1063 __ set(value, O7);
iveresov@2344 1064 }
iveresov@2344 1065 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
iveresov@2344 1066 __ stw(src_reg, addr.base(), addr.disp());
iveresov@2344 1067 break;
iveresov@2344 1068 }
roland@1732 1069 case T_ADDRESS: {
duke@435 1070 Register src_reg = O7;
duke@435 1071 int value = c->as_jint_bits();
duke@435 1072 if (value == 0) {
duke@435 1073 src_reg = G0;
duke@435 1074 } else {
duke@435 1075 __ set(value, O7);
duke@435 1076 }
duke@435 1077 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
iveresov@2344 1078 __ st_ptr(src_reg, addr.base(), addr.disp());
duke@435 1079 break;
duke@435 1080 }
duke@435 1081 case T_OBJECT: {
duke@435 1082 Register src_reg = O7;
duke@435 1083 jobject2reg(c->as_jobject(), src_reg);
duke@435 1084 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1085 __ st_ptr(src_reg, addr.base(), addr.disp());
duke@435 1086 break;
duke@435 1087 }
duke@435 1088 case T_LONG:
duke@435 1089 case T_DOUBLE: {
duke@435 1090 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
duke@435 1091
duke@435 1092 Register tmp = O7;
duke@435 1093 int value_lo = c->as_jint_lo_bits();
duke@435 1094 if (value_lo == 0) {
duke@435 1095 tmp = G0;
duke@435 1096 } else {
duke@435 1097 __ set(value_lo, O7);
duke@435 1098 }
duke@435 1099 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
duke@435 1100 int value_hi = c->as_jint_hi_bits();
duke@435 1101 if (value_hi == 0) {
duke@435 1102 tmp = G0;
duke@435 1103 } else {
duke@435 1104 __ set(value_hi, O7);
duke@435 1105 }
duke@435 1106 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
duke@435 1107 break;
duke@435 1108 }
duke@435 1109 default:
duke@435 1110 Unimplemented();
duke@435 1111 }
duke@435 1112 }
duke@435 1113
duke@435 1114
iveresov@2344 1115 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
duke@435 1116 LIR_Const* c = src->as_constant_ptr();
duke@435 1117 LIR_Address* addr = dest->as_address_ptr();
duke@435 1118 Register base = addr->base()->as_pointer_register();
iveresov@2344 1119 int offset = -1;
iveresov@2344 1120
duke@435 1121 switch (c->type()) {
duke@435 1122 case T_INT:
roland@1732 1123 case T_FLOAT:
roland@1732 1124 case T_ADDRESS: {
duke@435 1125 LIR_Opr tmp = FrameMap::O7_opr;
duke@435 1126 int value = c->as_jint_bits();
duke@435 1127 if (value == 0) {
duke@435 1128 tmp = FrameMap::G0_opr;
duke@435 1129 } else if (Assembler::is_simm13(value)) {
duke@435 1130 __ set(value, O7);
duke@435 1131 }
duke@435 1132 if (addr->index()->is_valid()) {
duke@435 1133 assert(addr->disp() == 0, "must be zero");
iveresov@2344 1134 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
duke@435 1135 } else {
duke@435 1136 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
iveresov@2344 1137 offset = store(tmp, base, addr->disp(), type, wide, false);
duke@435 1138 }
duke@435 1139 break;
duke@435 1140 }
duke@435 1141 case T_LONG:
duke@435 1142 case T_DOUBLE: {
duke@435 1143 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
duke@435 1144 assert(Assembler::is_simm13(addr->disp()) &&
duke@435 1145 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
duke@435 1146
iveresov@2344 1147 LIR_Opr tmp = FrameMap::O7_opr;
duke@435 1148 int value_lo = c->as_jint_lo_bits();
duke@435 1149 if (value_lo == 0) {
iveresov@2344 1150 tmp = FrameMap::G0_opr;
duke@435 1151 } else {
duke@435 1152 __ set(value_lo, O7);
duke@435 1153 }
iveresov@2344 1154 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
duke@435 1155 int value_hi = c->as_jint_hi_bits();
duke@435 1156 if (value_hi == 0) {
iveresov@2344 1157 tmp = FrameMap::G0_opr;
duke@435 1158 } else {
duke@435 1159 __ set(value_hi, O7);
duke@435 1160 }
never@3248 1161 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
duke@435 1162 break;
duke@435 1163 }
duke@435 1164 case T_OBJECT: {
duke@435 1165 jobject obj = c->as_jobject();
duke@435 1166 LIR_Opr tmp;
duke@435 1167 if (obj == NULL) {
duke@435 1168 tmp = FrameMap::G0_opr;
duke@435 1169 } else {
duke@435 1170 tmp = FrameMap::O7_opr;
duke@435 1171 jobject2reg(c->as_jobject(), O7);
duke@435 1172 }
duke@435 1173 // handle either reg+reg or reg+disp address
duke@435 1174 if (addr->index()->is_valid()) {
duke@435 1175 assert(addr->disp() == 0, "must be zero");
iveresov@2344 1176 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
duke@435 1177 } else {
duke@435 1178 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
iveresov@2344 1179 offset = store(tmp, base, addr->disp(), type, wide, false);
duke@435 1180 }
duke@435 1181
duke@435 1182 break;
duke@435 1183 }
duke@435 1184 default:
duke@435 1185 Unimplemented();
duke@435 1186 }
iveresov@2344 1187 if (info != NULL) {
iveresov@2344 1188 assert(offset != -1, "offset should've been set");
iveresov@2344 1189 add_debug_info_for_null_check(offset, info);
iveresov@2344 1190 }
duke@435 1191 }
duke@435 1192
duke@435 1193
duke@435 1194 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
duke@435 1195 LIR_Const* c = src->as_constant_ptr();
duke@435 1196 LIR_Opr to_reg = dest;
duke@435 1197
duke@435 1198 switch (c->type()) {
duke@435 1199 case T_INT:
roland@1732 1200 case T_ADDRESS:
duke@435 1201 {
duke@435 1202 jint con = c->as_jint();
duke@435 1203 if (to_reg->is_single_cpu()) {
duke@435 1204 assert(patch_code == lir_patch_none, "no patching handled here");
duke@435 1205 __ set(con, to_reg->as_register());
duke@435 1206 } else {
duke@435 1207 ShouldNotReachHere();
duke@435 1208 assert(to_reg->is_single_fpu(), "wrong register kind");
duke@435 1209
duke@435 1210 __ set(con, O7);
twisti@1162 1211 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
duke@435 1212 __ st(O7, temp_slot);
duke@435 1213 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
duke@435 1214 }
duke@435 1215 }
duke@435 1216 break;
duke@435 1217
duke@435 1218 case T_LONG:
duke@435 1219 {
duke@435 1220 jlong con = c->as_jlong();
duke@435 1221
duke@435 1222 if (to_reg->is_double_cpu()) {
duke@435 1223 #ifdef _LP64
duke@435 1224 __ set(con, to_reg->as_register_lo());
duke@435 1225 #else
duke@435 1226 __ set(low(con), to_reg->as_register_lo());
duke@435 1227 __ set(high(con), to_reg->as_register_hi());
duke@435 1228 #endif
duke@435 1229 #ifdef _LP64
duke@435 1230 } else if (to_reg->is_single_cpu()) {
duke@435 1231 __ set(con, to_reg->as_register());
duke@435 1232 #endif
duke@435 1233 } else {
duke@435 1234 ShouldNotReachHere();
duke@435 1235 assert(to_reg->is_double_fpu(), "wrong register kind");
twisti@1162 1236 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
twisti@1162 1237 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
duke@435 1238 __ set(low(con), O7);
duke@435 1239 __ st(O7, temp_slot_lo);
duke@435 1240 __ set(high(con), O7);
duke@435 1241 __ st(O7, temp_slot_hi);
duke@435 1242 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
duke@435 1243 }
duke@435 1244 }
duke@435 1245 break;
duke@435 1246
duke@435 1247 case T_OBJECT:
duke@435 1248 {
duke@435 1249 if (patch_code == lir_patch_none) {
duke@435 1250 jobject2reg(c->as_jobject(), to_reg->as_register());
duke@435 1251 } else {
duke@435 1252 jobject2reg_with_patching(to_reg->as_register(), info);
duke@435 1253 }
duke@435 1254 }
duke@435 1255 break;
duke@435 1256
coleenp@4037 1257 case T_METADATA:
coleenp@4037 1258 {
coleenp@4037 1259 if (patch_code == lir_patch_none) {
coleenp@4037 1260 metadata2reg(c->as_metadata(), to_reg->as_register());
coleenp@4037 1261 } else {
coleenp@4037 1262 klass2reg_with_patching(to_reg->as_register(), info);
coleenp@4037 1263 }
coleenp@4037 1264 }
coleenp@4037 1265 break;
coleenp@4037 1266
duke@435 1267 case T_FLOAT:
duke@435 1268 {
duke@435 1269 address const_addr = __ float_constant(c->as_jfloat());
duke@435 1270 if (const_addr == NULL) {
duke@435 1271 bailout("const section overflow");
duke@435 1272 break;
duke@435 1273 }
duke@435 1274 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
twisti@1162 1275 AddressLiteral const_addrlit(const_addr, rspec);
duke@435 1276 if (to_reg->is_single_fpu()) {
twisti@1162 1277 __ patchable_sethi(const_addrlit, O7);
duke@435 1278 __ relocate(rspec);
twisti@1162 1279 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
duke@435 1280
duke@435 1281 } else {
duke@435 1282 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
duke@435 1283
twisti@1162 1284 __ set(const_addrlit, O7);
iveresov@2344 1285 __ ld(O7, 0, to_reg->as_register());
duke@435 1286 }
duke@435 1287 }
duke@435 1288 break;
duke@435 1289
duke@435 1290 case T_DOUBLE:
duke@435 1291 {
duke@435 1292 address const_addr = __ double_constant(c->as_jdouble());
duke@435 1293 if (const_addr == NULL) {
duke@435 1294 bailout("const section overflow");
duke@435 1295 break;
duke@435 1296 }
duke@435 1297 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
duke@435 1298
duke@435 1299 if (to_reg->is_double_fpu()) {
twisti@1162 1300 AddressLiteral const_addrlit(const_addr, rspec);
twisti@1162 1301 __ patchable_sethi(const_addrlit, O7);
duke@435 1302 __ relocate(rspec);
twisti@1162 1303 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
duke@435 1304 } else {
duke@435 1305 assert(to_reg->is_double_cpu(), "Must be a long register.");
duke@435 1306 #ifdef _LP64
duke@435 1307 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
duke@435 1308 #else
duke@435 1309 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
duke@435 1310 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
duke@435 1311 #endif
duke@435 1312 }
duke@435 1313
duke@435 1314 }
duke@435 1315 break;
duke@435 1316
duke@435 1317 default:
duke@435 1318 ShouldNotReachHere();
duke@435 1319 }
duke@435 1320 }
duke@435 1321
duke@435 1322 Address LIR_Assembler::as_Address(LIR_Address* addr) {
roland@6223 1323 Register reg = addr->base()->as_pointer_register();
roland@4106 1324 LIR_Opr index = addr->index();
roland@4106 1325 if (index->is_illegal()) {
roland@4106 1326 return Address(reg, addr->disp());
roland@4106 1327 } else {
roland@4106 1328 assert (addr->disp() == 0, "unsupported address mode");
roland@4106 1329 return Address(reg, index->as_pointer_register());
roland@4106 1330 }
duke@435 1331 }
duke@435 1332
duke@435 1333
duke@435 1334 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
duke@435 1335 switch (type) {
duke@435 1336 case T_INT:
duke@435 1337 case T_FLOAT: {
duke@435 1338 Register tmp = O7;
duke@435 1339 Address from = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1340 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1341 __ lduw(from.base(), from.disp(), tmp);
duke@435 1342 __ stw(tmp, to.base(), to.disp());
duke@435 1343 break;
duke@435 1344 }
duke@435 1345 case T_OBJECT: {
duke@435 1346 Register tmp = O7;
duke@435 1347 Address from = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1348 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1349 __ ld_ptr(from.base(), from.disp(), tmp);
duke@435 1350 __ st_ptr(tmp, to.base(), to.disp());
duke@435 1351 break;
duke@435 1352 }
duke@435 1353 case T_LONG:
duke@435 1354 case T_DOUBLE: {
duke@435 1355 Register tmp = O7;
duke@435 1356 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
duke@435 1357 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
duke@435 1358 __ lduw(from.base(), from.disp(), tmp);
duke@435 1359 __ stw(tmp, to.base(), to.disp());
duke@435 1360 __ lduw(from.base(), from.disp() + 4, tmp);
duke@435 1361 __ stw(tmp, to.base(), to.disp() + 4);
duke@435 1362 break;
duke@435 1363 }
duke@435 1364
duke@435 1365 default:
duke@435 1366 ShouldNotReachHere();
duke@435 1367 }
duke@435 1368 }
duke@435 1369
duke@435 1370
duke@435 1371 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
duke@435 1372 Address base = as_Address(addr);
twisti@1162 1373 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
duke@435 1374 }
duke@435 1375
duke@435 1376
duke@435 1377 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
duke@435 1378 Address base = as_Address(addr);
twisti@1162 1379 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
duke@435 1380 }
duke@435 1381
duke@435 1382
duke@435 1383 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
iveresov@2344 1384 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
duke@435 1385
roland@4051 1386 assert(type != T_METADATA, "load of metadata ptr not supported");
duke@435 1387 LIR_Address* addr = src_opr->as_address_ptr();
duke@435 1388 LIR_Opr to_reg = dest;
duke@435 1389
duke@435 1390 Register src = addr->base()->as_pointer_register();
duke@435 1391 Register disp_reg = noreg;
duke@435 1392 int disp_value = addr->disp();
duke@435 1393 bool needs_patching = (patch_code != lir_patch_none);
duke@435 1394
duke@435 1395 if (addr->base()->type() == T_OBJECT) {
duke@435 1396 __ verify_oop(src);
duke@435 1397 }
duke@435 1398
duke@435 1399 PatchingStub* patch = NULL;
duke@435 1400 if (needs_patching) {
duke@435 1401 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
duke@435 1402 assert(!to_reg->is_double_cpu() ||
duke@435 1403 patch_code == lir_patch_none ||
duke@435 1404 patch_code == lir_patch_normal, "patching doesn't match register");
duke@435 1405 }
duke@435 1406
duke@435 1407 if (addr->index()->is_illegal()) {
duke@435 1408 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
duke@435 1409 if (needs_patching) {
twisti@1162 1410 __ patchable_set(0, O7);
duke@435 1411 } else {
duke@435 1412 __ set(disp_value, O7);
duke@435 1413 }
duke@435 1414 disp_reg = O7;
duke@435 1415 }
duke@435 1416 } else if (unaligned || PatchALot) {
duke@435 1417 __ add(src, addr->index()->as_register(), O7);
duke@435 1418 src = O7;
duke@435 1419 } else {
duke@435 1420 disp_reg = addr->index()->as_pointer_register();
duke@435 1421 assert(disp_value == 0, "can't handle 3 operand addresses");
duke@435 1422 }
duke@435 1423
duke@435 1424 // remember the offset of the load. The patching_epilog must be done
duke@435 1425 // before the call to add_debug_info, otherwise the PcDescs don't get
duke@435 1426 // entered in increasing order.
duke@435 1427 int offset = code_offset();
duke@435 1428
duke@435 1429 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
duke@435 1430 if (disp_reg == noreg) {
iveresov@2344 1431 offset = load(src, disp_value, to_reg, type, wide, unaligned);
duke@435 1432 } else {
duke@435 1433 assert(!unaligned, "can't handle this");
iveresov@2344 1434 offset = load(src, disp_reg, to_reg, type, wide);
duke@435 1435 }
duke@435 1436
duke@435 1437 if (patch != NULL) {
duke@435 1438 patching_epilog(patch, patch_code, src, info);
duke@435 1439 }
duke@435 1440 if (info != NULL) add_debug_info_for_null_check(offset, info);
duke@435 1441 }
duke@435 1442
duke@435 1443
duke@435 1444 void LIR_Assembler::prefetchr(LIR_Opr src) {
duke@435 1445 LIR_Address* addr = src->as_address_ptr();
duke@435 1446 Address from_addr = as_Address(addr);
duke@435 1447
duke@435 1448 if (VM_Version::has_v9()) {
duke@435 1449 __ prefetch(from_addr, Assembler::severalReads);
duke@435 1450 }
duke@435 1451 }
duke@435 1452
duke@435 1453
duke@435 1454 void LIR_Assembler::prefetchw(LIR_Opr src) {
duke@435 1455 LIR_Address* addr = src->as_address_ptr();
duke@435 1456 Address from_addr = as_Address(addr);
duke@435 1457
duke@435 1458 if (VM_Version::has_v9()) {
duke@435 1459 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
duke@435 1460 }
duke@435 1461 }
duke@435 1462
duke@435 1463
duke@435 1464 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
duke@435 1465 Address addr;
duke@435 1466 if (src->is_single_word()) {
duke@435 1467 addr = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1468 } else if (src->is_double_word()) {
duke@435 1469 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
duke@435 1470 }
duke@435 1471
duke@435 1472 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
iveresov@2344 1473 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
duke@435 1474 }
duke@435 1475
duke@435 1476
duke@435 1477 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
duke@435 1478 Address addr;
duke@435 1479 if (dest->is_single_word()) {
duke@435 1480 addr = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1481 } else if (dest->is_double_word()) {
duke@435 1482 addr = frame_map()->address_for_slot(dest->double_stack_ix());
duke@435 1483 }
duke@435 1484 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
iveresov@2344 1485 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
duke@435 1486 }
duke@435 1487
duke@435 1488
duke@435 1489 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
duke@435 1490 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
duke@435 1491 if (from_reg->is_double_fpu()) {
duke@435 1492 // double to double moves
duke@435 1493 assert(to_reg->is_double_fpu(), "should match");
duke@435 1494 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
duke@435 1495 } else {
duke@435 1496 // float to float moves
duke@435 1497 assert(to_reg->is_single_fpu(), "should match");
duke@435 1498 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
duke@435 1499 }
duke@435 1500 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
duke@435 1501 if (from_reg->is_double_cpu()) {
duke@435 1502 #ifdef _LP64
duke@435 1503 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
duke@435 1504 #else
duke@435 1505 assert(to_reg->is_double_cpu() &&
duke@435 1506 from_reg->as_register_hi() != to_reg->as_register_lo() &&
duke@435 1507 from_reg->as_register_lo() != to_reg->as_register_hi(),
duke@435 1508 "should both be long and not overlap");
duke@435 1509 // long to long moves
duke@435 1510 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
duke@435 1511 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
duke@435 1512 #endif
duke@435 1513 #ifdef _LP64
duke@435 1514 } else if (to_reg->is_double_cpu()) {
duke@435 1515 // int to int moves
duke@435 1516 __ mov(from_reg->as_register(), to_reg->as_register_lo());
duke@435 1517 #endif
duke@435 1518 } else {
duke@435 1519 // int to int moves
duke@435 1520 __ mov(from_reg->as_register(), to_reg->as_register());
duke@435 1521 }
duke@435 1522 } else {
duke@435 1523 ShouldNotReachHere();
duke@435 1524 }
duke@435 1525 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
duke@435 1526 __ verify_oop(to_reg->as_register());
duke@435 1527 }
duke@435 1528 }
duke@435 1529
duke@435 1530
duke@435 1531 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
duke@435 1532 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
iveresov@2344 1533 bool wide, bool unaligned) {
roland@4051 1534 assert(type != T_METADATA, "store of metadata ptr not supported");
duke@435 1535 LIR_Address* addr = dest->as_address_ptr();
duke@435 1536
duke@435 1537 Register src = addr->base()->as_pointer_register();
duke@435 1538 Register disp_reg = noreg;
duke@435 1539 int disp_value = addr->disp();
duke@435 1540 bool needs_patching = (patch_code != lir_patch_none);
duke@435 1541
duke@435 1542 if (addr->base()->is_oop_register()) {
duke@435 1543 __ verify_oop(src);
duke@435 1544 }
duke@435 1545
duke@435 1546 PatchingStub* patch = NULL;
duke@435 1547 if (needs_patching) {
duke@435 1548 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
duke@435 1549 assert(!from_reg->is_double_cpu() ||
duke@435 1550 patch_code == lir_patch_none ||
duke@435 1551 patch_code == lir_patch_normal, "patching doesn't match register");
duke@435 1552 }
duke@435 1553
duke@435 1554 if (addr->index()->is_illegal()) {
duke@435 1555 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
duke@435 1556 if (needs_patching) {
twisti@1162 1557 __ patchable_set(0, O7);
duke@435 1558 } else {
duke@435 1559 __ set(disp_value, O7);
duke@435 1560 }
duke@435 1561 disp_reg = O7;
duke@435 1562 }
duke@435 1563 } else if (unaligned || PatchALot) {
duke@435 1564 __ add(src, addr->index()->as_register(), O7);
duke@435 1565 src = O7;
duke@435 1566 } else {
duke@435 1567 disp_reg = addr->index()->as_pointer_register();
duke@435 1568 assert(disp_value == 0, "can't handle 3 operand addresses");
duke@435 1569 }
duke@435 1570
duke@435 1571 // remember the offset of the store. The patching_epilog must be done
duke@435 1572 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
duke@435 1573 // entered in increasing order.
duke@435 1574 int offset;
duke@435 1575
duke@435 1576 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
duke@435 1577 if (disp_reg == noreg) {
iveresov@2344 1578 offset = store(from_reg, src, disp_value, type, wide, unaligned);
duke@435 1579 } else {
duke@435 1580 assert(!unaligned, "can't handle this");
iveresov@2344 1581 offset = store(from_reg, src, disp_reg, type, wide);
duke@435 1582 }
duke@435 1583
duke@435 1584 if (patch != NULL) {
duke@435 1585 patching_epilog(patch, patch_code, src, info);
duke@435 1586 }
duke@435 1587
duke@435 1588 if (info != NULL) add_debug_info_for_null_check(offset, info);
duke@435 1589 }
duke@435 1590
duke@435 1591
duke@435 1592 void LIR_Assembler::return_op(LIR_Opr result) {
duke@435 1593 // the poll may need a register so just pick one that isn't the return register
iveresov@2138 1594 #if defined(TIERED) && !defined(_LP64)
duke@435 1595 if (result->type_field() == LIR_OprDesc::long_type) {
duke@435 1596 // Must move the result to G1
duke@435 1597 // Must leave proper result in O0,O1 and G1 (TIERED only)
duke@435 1598 __ sllx(I0, 32, G1); // Shift bits into high G1
duke@435 1599 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
duke@435 1600 __ or3 (I1, G1, G1); // OR 64 bits into G1
iveresov@2138 1601 #ifdef ASSERT
iveresov@2138 1602 // mangle it so any problems will show up
iveresov@2138 1603 __ set(0xdeadbeef, I0);
iveresov@2138 1604 __ set(0xdeadbeef, I1);
iveresov@2138 1605 #endif
duke@435 1606 }
duke@435 1607 #endif // TIERED
duke@435 1608 __ set((intptr_t)os::get_polling_page(), L0);
duke@435 1609 __ relocate(relocInfo::poll_return_type);
duke@435 1610 __ ld_ptr(L0, 0, G0);
duke@435 1611 __ ret();
duke@435 1612 __ delayed()->restore();
duke@435 1613 }
duke@435 1614
duke@435 1615
duke@435 1616 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
duke@435 1617 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
duke@435 1618 if (info != NULL) {
duke@435 1619 add_debug_info_for_branch(info);
duke@435 1620 } else {
duke@435 1621 __ relocate(relocInfo::poll_type);
duke@435 1622 }
duke@435 1623
duke@435 1624 int offset = __ offset();
duke@435 1625 __ ld_ptr(tmp->as_register(), 0, G0);
duke@435 1626
duke@435 1627 return offset;
duke@435 1628 }
duke@435 1629
duke@435 1630
duke@435 1631 void LIR_Assembler::emit_static_call_stub() {
duke@435 1632 address call_pc = __ pc();
duke@435 1633 address stub = __ start_a_stub(call_stub_size);
duke@435 1634 if (stub == NULL) {
duke@435 1635 bailout("static call stub overflow");
duke@435 1636 return;
duke@435 1637 }
duke@435 1638
duke@435 1639 int start = __ offset();
duke@435 1640 __ relocate(static_stub_Relocation::spec(call_pc));
duke@435 1641
coleenp@4037 1642 __ set_metadata(NULL, G5);
duke@435 1643 // must be set to -1 at code generation time
twisti@1162 1644 AddressLiteral addrlit(-1);
twisti@1162 1645 __ jump_to(addrlit, G3);
duke@435 1646 __ delayed()->nop();
duke@435 1647
duke@435 1648 assert(__ offset() - start <= call_stub_size, "stub too big");
duke@435 1649 __ end_a_stub();
duke@435 1650 }
duke@435 1651
duke@435 1652
duke@435 1653 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
duke@435 1654 if (opr1->is_single_fpu()) {
duke@435 1655 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
duke@435 1656 } else if (opr1->is_double_fpu()) {
duke@435 1657 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
duke@435 1658 } else if (opr1->is_single_cpu()) {
duke@435 1659 if (opr2->is_constant()) {
duke@435 1660 switch (opr2->as_constant_ptr()->type()) {
duke@435 1661 case T_INT:
duke@435 1662 { jint con = opr2->as_constant_ptr()->as_jint();
duke@435 1663 if (Assembler::is_simm13(con)) {
duke@435 1664 __ cmp(opr1->as_register(), con);
duke@435 1665 } else {
duke@435 1666 __ set(con, O7);
duke@435 1667 __ cmp(opr1->as_register(), O7);
duke@435 1668 }
duke@435 1669 }
duke@435 1670 break;
duke@435 1671
duke@435 1672 case T_OBJECT:
duke@435 1673 // there are only equal/notequal comparisions on objects
duke@435 1674 { jobject con = opr2->as_constant_ptr()->as_jobject();
duke@435 1675 if (con == NULL) {
duke@435 1676 __ cmp(opr1->as_register(), 0);
duke@435 1677 } else {
duke@435 1678 jobject2reg(con, O7);
duke@435 1679 __ cmp(opr1->as_register(), O7);
duke@435 1680 }
duke@435 1681 }
duke@435 1682 break;
duke@435 1683
duke@435 1684 default:
duke@435 1685 ShouldNotReachHere();
duke@435 1686 break;
duke@435 1687 }
duke@435 1688 } else {
duke@435 1689 if (opr2->is_address()) {
duke@435 1690 LIR_Address * addr = opr2->as_address_ptr();
duke@435 1691 BasicType type = addr->type();
duke@435 1692 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
duke@435 1693 else __ ld(as_Address(addr), O7);
duke@435 1694 __ cmp(opr1->as_register(), O7);
duke@435 1695 } else {
duke@435 1696 __ cmp(opr1->as_register(), opr2->as_register());
duke@435 1697 }
duke@435 1698 }
duke@435 1699 } else if (opr1->is_double_cpu()) {
duke@435 1700 Register xlo = opr1->as_register_lo();
duke@435 1701 Register xhi = opr1->as_register_hi();
duke@435 1702 if (opr2->is_constant() && opr2->as_jlong() == 0) {
duke@435 1703 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
duke@435 1704 #ifdef _LP64
duke@435 1705 __ orcc(xhi, G0, G0);
duke@435 1706 #else
duke@435 1707 __ orcc(xhi, xlo, G0);
duke@435 1708 #endif
duke@435 1709 } else if (opr2->is_register()) {
duke@435 1710 Register ylo = opr2->as_register_lo();
duke@435 1711 Register yhi = opr2->as_register_hi();
duke@435 1712 #ifdef _LP64
duke@435 1713 __ cmp(xlo, ylo);
duke@435 1714 #else
duke@435 1715 __ subcc(xlo, ylo, xlo);
duke@435 1716 __ subccc(xhi, yhi, xhi);
duke@435 1717 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
duke@435 1718 __ orcc(xhi, xlo, G0);
duke@435 1719 }
duke@435 1720 #endif
duke@435 1721 } else {
duke@435 1722 ShouldNotReachHere();
duke@435 1723 }
duke@435 1724 } else if (opr1->is_address()) {
duke@435 1725 LIR_Address * addr = opr1->as_address_ptr();
duke@435 1726 BasicType type = addr->type();
duke@435 1727 assert (opr2->is_constant(), "Checking");
duke@435 1728 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
duke@435 1729 else __ ld(as_Address(addr), O7);
duke@435 1730 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
duke@435 1731 } else {
duke@435 1732 ShouldNotReachHere();
duke@435 1733 }
duke@435 1734 }
duke@435 1735
duke@435 1736
duke@435 1737 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
duke@435 1738 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
duke@435 1739 bool is_unordered_less = (code == lir_ucmp_fd2i);
duke@435 1740 if (left->is_single_fpu()) {
duke@435 1741 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
duke@435 1742 } else if (left->is_double_fpu()) {
duke@435 1743 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
duke@435 1744 } else {
duke@435 1745 ShouldNotReachHere();
duke@435 1746 }
duke@435 1747 } else if (code == lir_cmp_l2i) {
iveresov@1804 1748 #ifdef _LP64
iveresov@1804 1749 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
iveresov@1804 1750 #else
duke@435 1751 __ lcmp(left->as_register_hi(), left->as_register_lo(),
duke@435 1752 right->as_register_hi(), right->as_register_lo(),
duke@435 1753 dst->as_register());
iveresov@1804 1754 #endif
duke@435 1755 } else {
duke@435 1756 ShouldNotReachHere();
duke@435 1757 }
duke@435 1758 }
duke@435 1759
duke@435 1760
iveresov@2412 1761 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
duke@435 1762 Assembler::Condition acond;
duke@435 1763 switch (condition) {
duke@435 1764 case lir_cond_equal: acond = Assembler::equal; break;
duke@435 1765 case lir_cond_notEqual: acond = Assembler::notEqual; break;
duke@435 1766 case lir_cond_less: acond = Assembler::less; break;
duke@435 1767 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
duke@435 1768 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
duke@435 1769 case lir_cond_greater: acond = Assembler::greater; break;
duke@435 1770 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
duke@435 1771 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
duke@435 1772 default: ShouldNotReachHere();
duke@435 1773 };
duke@435 1774
duke@435 1775 if (opr1->is_constant() && opr1->type() == T_INT) {
duke@435 1776 Register dest = result->as_register();
duke@435 1777 // load up first part of constant before branch
duke@435 1778 // and do the rest in the delay slot.
duke@435 1779 if (!Assembler::is_simm13(opr1->as_jint())) {
duke@435 1780 __ sethi(opr1->as_jint(), dest);
duke@435 1781 }
duke@435 1782 } else if (opr1->is_constant()) {
duke@435 1783 const2reg(opr1, result, lir_patch_none, NULL);
duke@435 1784 } else if (opr1->is_register()) {
duke@435 1785 reg2reg(opr1, result);
duke@435 1786 } else if (opr1->is_stack()) {
duke@435 1787 stack2reg(opr1, result, result->type());
duke@435 1788 } else {
duke@435 1789 ShouldNotReachHere();
duke@435 1790 }
duke@435 1791 Label skip;
iveresov@2412 1792 #ifdef _LP64
iveresov@2412 1793 if (type == T_INT) {
iveresov@2412 1794 __ br(acond, false, Assembler::pt, skip);
iveresov@2412 1795 } else
iveresov@2412 1796 #endif
iveresov@2412 1797 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
duke@435 1798 if (opr1->is_constant() && opr1->type() == T_INT) {
duke@435 1799 Register dest = result->as_register();
duke@435 1800 if (Assembler::is_simm13(opr1->as_jint())) {
duke@435 1801 __ delayed()->or3(G0, opr1->as_jint(), dest);
duke@435 1802 } else {
duke@435 1803 // the sethi has been done above, so just put in the low 10 bits
duke@435 1804 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
duke@435 1805 }
duke@435 1806 } else {
duke@435 1807 // can't do anything useful in the delay slot
duke@435 1808 __ delayed()->nop();
duke@435 1809 }
duke@435 1810 if (opr2->is_constant()) {
duke@435 1811 const2reg(opr2, result, lir_patch_none, NULL);
duke@435 1812 } else if (opr2->is_register()) {
duke@435 1813 reg2reg(opr2, result);
duke@435 1814 } else if (opr2->is_stack()) {
duke@435 1815 stack2reg(opr2, result, result->type());
duke@435 1816 } else {
duke@435 1817 ShouldNotReachHere();
duke@435 1818 }
duke@435 1819 __ bind(skip);
duke@435 1820 }
duke@435 1821
duke@435 1822
duke@435 1823 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
duke@435 1824 assert(info == NULL, "unused on this code path");
duke@435 1825 assert(left->is_register(), "wrong items state");
duke@435 1826 assert(dest->is_register(), "wrong items state");
duke@435 1827
duke@435 1828 if (right->is_register()) {
duke@435 1829 if (dest->is_float_kind()) {
duke@435 1830
duke@435 1831 FloatRegister lreg, rreg, res;
duke@435 1832 FloatRegisterImpl::Width w;
duke@435 1833 if (right->is_single_fpu()) {
duke@435 1834 w = FloatRegisterImpl::S;
duke@435 1835 lreg = left->as_float_reg();
duke@435 1836 rreg = right->as_float_reg();
duke@435 1837 res = dest->as_float_reg();
duke@435 1838 } else {
duke@435 1839 w = FloatRegisterImpl::D;
duke@435 1840 lreg = left->as_double_reg();
duke@435 1841 rreg = right->as_double_reg();
duke@435 1842 res = dest->as_double_reg();
duke@435 1843 }
duke@435 1844
duke@435 1845 switch (code) {
duke@435 1846 case lir_add: __ fadd(w, lreg, rreg, res); break;
duke@435 1847 case lir_sub: __ fsub(w, lreg, rreg, res); break;
duke@435 1848 case lir_mul: // fall through
duke@435 1849 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
duke@435 1850 case lir_div: // fall through
duke@435 1851 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
duke@435 1852 default: ShouldNotReachHere();
duke@435 1853 }
duke@435 1854
duke@435 1855 } else if (dest->is_double_cpu()) {
duke@435 1856 #ifdef _LP64
duke@435 1857 Register dst_lo = dest->as_register_lo();
duke@435 1858 Register op1_lo = left->as_pointer_register();
duke@435 1859 Register op2_lo = right->as_pointer_register();
duke@435 1860
duke@435 1861 switch (code) {
duke@435 1862 case lir_add:
duke@435 1863 __ add(op1_lo, op2_lo, dst_lo);
duke@435 1864 break;
duke@435 1865
duke@435 1866 case lir_sub:
duke@435 1867 __ sub(op1_lo, op2_lo, dst_lo);
duke@435 1868 break;
duke@435 1869
duke@435 1870 default: ShouldNotReachHere();
duke@435 1871 }
duke@435 1872 #else
duke@435 1873 Register op1_lo = left->as_register_lo();
duke@435 1874 Register op1_hi = left->as_register_hi();
duke@435 1875 Register op2_lo = right->as_register_lo();
duke@435 1876 Register op2_hi = right->as_register_hi();
duke@435 1877 Register dst_lo = dest->as_register_lo();
duke@435 1878 Register dst_hi = dest->as_register_hi();
duke@435 1879
duke@435 1880 switch (code) {
duke@435 1881 case lir_add:
duke@435 1882 __ addcc(op1_lo, op2_lo, dst_lo);
duke@435 1883 __ addc (op1_hi, op2_hi, dst_hi);
duke@435 1884 break;
duke@435 1885
duke@435 1886 case lir_sub:
duke@435 1887 __ subcc(op1_lo, op2_lo, dst_lo);
duke@435 1888 __ subc (op1_hi, op2_hi, dst_hi);
duke@435 1889 break;
duke@435 1890
duke@435 1891 default: ShouldNotReachHere();
duke@435 1892 }
duke@435 1893 #endif
duke@435 1894 } else {
duke@435 1895 assert (right->is_single_cpu(), "Just Checking");
duke@435 1896
duke@435 1897 Register lreg = left->as_register();
duke@435 1898 Register res = dest->as_register();
duke@435 1899 Register rreg = right->as_register();
duke@435 1900 switch (code) {
duke@435 1901 case lir_add: __ add (lreg, rreg, res); break;
duke@435 1902 case lir_sub: __ sub (lreg, rreg, res); break;
morris@5283 1903 case lir_mul: __ mulx (lreg, rreg, res); break;
duke@435 1904 default: ShouldNotReachHere();
duke@435 1905 }
duke@435 1906 }
duke@435 1907 } else {
duke@435 1908 assert (right->is_constant(), "must be constant");
duke@435 1909
duke@435 1910 if (dest->is_single_cpu()) {
duke@435 1911 Register lreg = left->as_register();
duke@435 1912 Register res = dest->as_register();
duke@435 1913 int simm13 = right->as_constant_ptr()->as_jint();
duke@435 1914
duke@435 1915 switch (code) {
duke@435 1916 case lir_add: __ add (lreg, simm13, res); break;
duke@435 1917 case lir_sub: __ sub (lreg, simm13, res); break;
morris@5283 1918 case lir_mul: __ mulx (lreg, simm13, res); break;
duke@435 1919 default: ShouldNotReachHere();
duke@435 1920 }
duke@435 1921 } else {
duke@435 1922 Register lreg = left->as_pointer_register();
duke@435 1923 Register res = dest->as_register_lo();
duke@435 1924 long con = right->as_constant_ptr()->as_jlong();
duke@435 1925 assert(Assembler::is_simm13(con), "must be simm13");
duke@435 1926
duke@435 1927 switch (code) {
duke@435 1928 case lir_add: __ add (lreg, (int)con, res); break;
duke@435 1929 case lir_sub: __ sub (lreg, (int)con, res); break;
morris@5283 1930 case lir_mul: __ mulx (lreg, (int)con, res); break;
duke@435 1931 default: ShouldNotReachHere();
duke@435 1932 }
duke@435 1933 }
duke@435 1934 }
duke@435 1935 }
duke@435 1936
duke@435 1937
duke@435 1938 void LIR_Assembler::fpop() {
duke@435 1939 // do nothing
duke@435 1940 }
duke@435 1941
duke@435 1942
duke@435 1943 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
duke@435 1944 switch (code) {
duke@435 1945 case lir_sin:
duke@435 1946 case lir_tan:
duke@435 1947 case lir_cos: {
duke@435 1948 assert(thread->is_valid(), "preserve the thread object for performance reasons");
duke@435 1949 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
duke@435 1950 break;
duke@435 1951 }
duke@435 1952 case lir_sqrt: {
duke@435 1953 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
duke@435 1954 FloatRegister src_reg = value->as_double_reg();
duke@435 1955 FloatRegister dst_reg = dest->as_double_reg();
duke@435 1956 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
duke@435 1957 break;
duke@435 1958 }
duke@435 1959 case lir_abs: {
duke@435 1960 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
duke@435 1961 FloatRegister src_reg = value->as_double_reg();
duke@435 1962 FloatRegister dst_reg = dest->as_double_reg();
duke@435 1963 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
duke@435 1964 break;
duke@435 1965 }
duke@435 1966 default: {
duke@435 1967 ShouldNotReachHere();
duke@435 1968 break;
duke@435 1969 }
duke@435 1970 }
duke@435 1971 }
duke@435 1972
duke@435 1973
duke@435 1974 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
duke@435 1975 if (right->is_constant()) {
duke@435 1976 if (dest->is_single_cpu()) {
duke@435 1977 int simm13 = right->as_constant_ptr()->as_jint();
duke@435 1978 switch (code) {
duke@435 1979 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1980 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1981 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1982 default: ShouldNotReachHere();
duke@435 1983 }
duke@435 1984 } else {
duke@435 1985 long c = right->as_constant_ptr()->as_jlong();
duke@435 1986 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
duke@435 1987 int simm13 = (int)c;
duke@435 1988 switch (code) {
duke@435 1989 case lir_logic_and:
duke@435 1990 #ifndef _LP64
duke@435 1991 __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1992 #endif
duke@435 1993 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 1994 break;
duke@435 1995
duke@435 1996 case lir_logic_or:
duke@435 1997 #ifndef _LP64
duke@435 1998 __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1999 #endif
duke@435 2000 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 2001 break;
duke@435 2002
duke@435 2003 case lir_logic_xor:
duke@435 2004 #ifndef _LP64
duke@435 2005 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 2006 #endif
duke@435 2007 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 2008 break;
duke@435 2009
duke@435 2010 default: ShouldNotReachHere();
duke@435 2011 }
duke@435 2012 }
duke@435 2013 } else {
duke@435 2014 assert(right->is_register(), "right should be in register");
duke@435 2015
duke@435 2016 if (dest->is_single_cpu()) {
duke@435 2017 switch (code) {
duke@435 2018 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 2019 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 2020 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 2021 default: ShouldNotReachHere();
duke@435 2022 }
duke@435 2023 } else {
duke@435 2024 #ifdef _LP64
duke@435 2025 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
duke@435 2026 left->as_register_lo();
duke@435 2027 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
duke@435 2028 right->as_register_lo();
duke@435 2029
duke@435 2030 switch (code) {
duke@435 2031 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
duke@435 2032 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
duke@435 2033 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
duke@435 2034 default: ShouldNotReachHere();
duke@435 2035 }
duke@435 2036 #else
duke@435 2037 switch (code) {
duke@435 2038 case lir_logic_and:
duke@435 2039 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 2040 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 2041 break;
duke@435 2042
duke@435 2043 case lir_logic_or:
duke@435 2044 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 2045 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 2046 break;
duke@435 2047
duke@435 2048 case lir_logic_xor:
duke@435 2049 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 2050 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 2051 break;
duke@435 2052
duke@435 2053 default: ShouldNotReachHere();
duke@435 2054 }
duke@435 2055 #endif
duke@435 2056 }
duke@435 2057 }
duke@435 2058 }
duke@435 2059
duke@435 2060
duke@435 2061 int LIR_Assembler::shift_amount(BasicType t) {
kvn@464 2062 int elem_size = type2aelembytes(t);
duke@435 2063 switch (elem_size) {
duke@435 2064 case 1 : return 0;
duke@435 2065 case 2 : return 1;
duke@435 2066 case 4 : return 2;
duke@435 2067 case 8 : return 3;
duke@435 2068 }
duke@435 2069 ShouldNotReachHere();
duke@435 2070 return -1;
duke@435 2071 }
duke@435 2072
duke@435 2073
never@1813 2074 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
duke@435 2075 assert(exceptionOop->as_register() == Oexception, "should match");
never@1813 2076 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
duke@435 2077
duke@435 2078 info->add_register_oop(exceptionOop);
duke@435 2079
never@1813 2080 // reuse the debug info from the safepoint poll for the throw op itself
never@1813 2081 address pc_for_athrow = __ pc();
never@1813 2082 int pc_for_athrow_offset = __ offset();
never@1813 2083 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
never@1813 2084 __ set(pc_for_athrow, Oissuing_pc, rspec);
never@1813 2085 add_call_info(pc_for_athrow_offset, info); // for exception handler
never@1813 2086
never@1813 2087 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
never@1813 2088 __ delayed()->nop();
never@1813 2089 }
never@1813 2090
never@1813 2091
never@1813 2092 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
never@1813 2093 assert(exceptionOop->as_register() == Oexception, "should match");
never@1813 2094
never@1813 2095 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
never@1813 2096 __ delayed()->nop();
duke@435 2097 }
duke@435 2098
duke@435 2099 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
duke@435 2100 Register src = op->src()->as_register();
duke@435 2101 Register dst = op->dst()->as_register();
duke@435 2102 Register src_pos = op->src_pos()->as_register();
duke@435 2103 Register dst_pos = op->dst_pos()->as_register();
duke@435 2104 Register length = op->length()->as_register();
duke@435 2105 Register tmp = op->tmp()->as_register();
duke@435 2106 Register tmp2 = O7;
duke@435 2107
duke@435 2108 int flags = op->flags();
duke@435 2109 ciArrayKlass* default_type = op->expected_type();
duke@435 2110 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
duke@435 2111 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
duke@435 2112
iveresov@2731 2113 #ifdef _LP64
iveresov@2731 2114 // higher 32bits must be null
iveresov@2731 2115 __ sra(dst_pos, 0, dst_pos);
iveresov@2731 2116 __ sra(src_pos, 0, src_pos);
iveresov@2731 2117 __ sra(length, 0, length);
iveresov@2731 2118 #endif
iveresov@2731 2119
duke@435 2120 // set up the arraycopy stub information
duke@435 2121 ArrayCopyStub* stub = op->stub();
duke@435 2122
duke@435 2123 // always do stub if no type information is available. it's ok if
duke@435 2124 // the known type isn't loaded since the code sanity checks
duke@435 2125 // in debug mode and the type isn't required when we know the exact type
duke@435 2126 // also check that the type is an array type.
roland@2728 2127 if (op->expected_type() == NULL) {
duke@435 2128 __ mov(src, O0);
duke@435 2129 __ mov(src_pos, O1);
duke@435 2130 __ mov(dst, O2);
duke@435 2131 __ mov(dst_pos, O3);
duke@435 2132 __ mov(length, O4);
roland@2728 2133 address copyfunc_addr = StubRoutines::generic_arraycopy();
roland@2728 2134
roland@2728 2135 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
roland@2728 2136 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
roland@2728 2137 } else {
roland@2728 2138 #ifndef PRODUCT
roland@2728 2139 if (PrintC1Statistics) {
roland@2728 2140 address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
roland@2728 2141 __ inc_counter(counter, G1, G3);
roland@2728 2142 }
roland@2728 2143 #endif
roland@2728 2144 __ call_VM_leaf(tmp, copyfunc_addr);
roland@2728 2145 }
roland@2728 2146
roland@2728 2147 if (copyfunc_addr != NULL) {
roland@2728 2148 __ xor3(O0, -1, tmp);
roland@2728 2149 __ sub(length, tmp, length);
roland@2728 2150 __ add(src_pos, tmp, src_pos);
kvn@3037 2151 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
roland@2728 2152 __ delayed()->add(dst_pos, tmp, dst_pos);
roland@2728 2153 } else {
kvn@3037 2154 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
roland@2728 2155 __ delayed()->nop();
roland@2728 2156 }
duke@435 2157 __ bind(*stub->continuation());
duke@435 2158 return;
duke@435 2159 }
duke@435 2160
duke@435 2161 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
duke@435 2162
duke@435 2163 // make sure src and dst are non-null and load array length
duke@435 2164 if (flags & LIR_OpArrayCopy::src_null_check) {
duke@435 2165 __ tst(src);
iveresov@2344 2166 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
duke@435 2167 __ delayed()->nop();
duke@435 2168 }
duke@435 2169
duke@435 2170 if (flags & LIR_OpArrayCopy::dst_null_check) {
duke@435 2171 __ tst(dst);
iveresov@2344 2172 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
duke@435 2173 __ delayed()->nop();
duke@435 2174 }
duke@435 2175
zmajo@8563 2176 // If the compiler was not able to prove that exact type of the source or the destination
zmajo@8563 2177 // of the arraycopy is an array type, check at runtime if the source or the destination is
zmajo@8563 2178 // an instance type.
zmajo@8563 2179 if (flags & LIR_OpArrayCopy::type_check) {
zmajo@8563 2180 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
zmajo@8563 2181 __ load_klass(dst, tmp);
zmajo@8563 2182 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2);
zmajo@8563 2183 __ cmp(tmp2, Klass::_lh_neutral_value);
zmajo@8563 2184 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry());
zmajo@8563 2185 __ delayed()->nop();
zmajo@8563 2186 }
zmajo@8563 2187
zmajo@8563 2188 if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
zmajo@8563 2189 __ load_klass(src, tmp);
zmajo@8563 2190 __ lduw(tmp, in_bytes(Klass::layout_helper_offset()), tmp2);
zmajo@8563 2191 __ cmp(tmp2, Klass::_lh_neutral_value);
zmajo@8563 2192 __ br(Assembler::greaterEqual, false, Assembler::pn, *stub->entry());
zmajo@8563 2193 __ delayed()->nop();
zmajo@8563 2194 }
zmajo@8563 2195 }
zmajo@8563 2196
duke@435 2197 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
duke@435 2198 // test src_pos register
kvn@3037 2199 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());
duke@435 2200 __ delayed()->nop();
duke@435 2201 }
duke@435 2202
duke@435 2203 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
duke@435 2204 // test dst_pos register
kvn@3037 2205 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry());
duke@435 2206 __ delayed()->nop();
duke@435 2207 }
duke@435 2208
duke@435 2209 if (flags & LIR_OpArrayCopy::length_positive_check) {
duke@435 2210 // make sure length isn't negative
kvn@3037 2211 __ cmp_zero_and_br(Assembler::less, length, *stub->entry());
duke@435 2212 __ delayed()->nop();
duke@435 2213 }
duke@435 2214
duke@435 2215 if (flags & LIR_OpArrayCopy::src_range_check) {
duke@435 2216 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
duke@435 2217 __ add(length, src_pos, tmp);
duke@435 2218 __ cmp(tmp2, tmp);
duke@435 2219 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
duke@435 2220 __ delayed()->nop();
duke@435 2221 }
duke@435 2222
duke@435 2223 if (flags & LIR_OpArrayCopy::dst_range_check) {
duke@435 2224 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
duke@435 2225 __ add(length, dst_pos, tmp);
duke@435 2226 __ cmp(tmp2, tmp);
duke@435 2227 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
duke@435 2228 __ delayed()->nop();
duke@435 2229 }
duke@435 2230
roland@2728 2231 int shift = shift_amount(basic_type);
roland@2728 2232
duke@435 2233 if (flags & LIR_OpArrayCopy::type_check) {
roland@2728 2234 // We don't know the array types are compatible
roland@2728 2235 if (basic_type != T_OBJECT) {
roland@2728 2236 // Simple test for basic type arrays
ehelin@5694 2237 if (UseCompressedClassPointers) {
roland@2728 2238 // We don't need decode because we just need to compare
roland@2728 2239 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
roland@2728 2240 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
roland@2728 2241 __ cmp(tmp, tmp2);
roland@2728 2242 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
roland@2728 2243 } else {
roland@2728 2244 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
roland@2728 2245 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
roland@2728 2246 __ cmp(tmp, tmp2);
roland@2728 2247 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
roland@2728 2248 }
roland@2728 2249 __ delayed()->nop();
iveresov@2344 2250 } else {
roland@2728 2251 // For object arrays, if src is a sub class of dst then we can
roland@2728 2252 // safely do the copy.
roland@2728 2253 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
roland@2728 2254
roland@2728 2255 Label cont, slow;
roland@2728 2256 assert_different_registers(tmp, tmp2, G3, G1);
roland@2728 2257
roland@2728 2258 __ load_klass(src, G3);
roland@2728 2259 __ load_klass(dst, G1);
roland@2728 2260
roland@2728 2261 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
roland@2728 2262
roland@2728 2263 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
roland@2728 2264 __ delayed()->nop();
roland@2728 2265
roland@2728 2266 __ cmp(G3, 0);
roland@2728 2267 if (copyfunc_addr != NULL) { // use stub if available
roland@2728 2268 // src is not a sub class of dst so we have to do a
roland@2728 2269 // per-element check.
roland@2728 2270 __ br(Assembler::notEqual, false, Assembler::pt, cont);
roland@2728 2271 __ delayed()->nop();
roland@2728 2272
roland@2728 2273 __ bind(slow);
roland@2728 2274
roland@2728 2275 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
roland@2728 2276 if ((flags & mask) != mask) {
roland@2728 2277 // Check that at least both of them object arrays.
roland@2728 2278 assert(flags & mask, "one of the two should be known to be an object array");
roland@2728 2279
roland@2728 2280 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
roland@2728 2281 __ load_klass(src, tmp);
roland@2728 2282 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
roland@2728 2283 __ load_klass(dst, tmp);
roland@2728 2284 }
stefank@3391 2285 int lh_offset = in_bytes(Klass::layout_helper_offset());
roland@2728 2286
roland@2728 2287 __ lduw(tmp, lh_offset, tmp2);
roland@2728 2288
roland@2728 2289 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
roland@2728 2290 __ set(objArray_lh, tmp);
roland@2728 2291 __ cmp(tmp, tmp2);
roland@2728 2292 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
roland@2728 2293 __ delayed()->nop();
roland@2728 2294 }
roland@2728 2295
roland@2728 2296 Register src_ptr = O0;
roland@2728 2297 Register dst_ptr = O1;
roland@2728 2298 Register len = O2;
roland@2728 2299 Register chk_off = O3;
roland@2728 2300 Register super_k = O4;
roland@2728 2301
roland@2728 2302 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
roland@2728 2303 if (shift == 0) {
roland@2728 2304 __ add(src_ptr, src_pos, src_ptr);
roland@2728 2305 } else {
roland@2728 2306 __ sll(src_pos, shift, tmp);
roland@2728 2307 __ add(src_ptr, tmp, src_ptr);
roland@2728 2308 }
roland@2728 2309
roland@2728 2310 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
roland@2728 2311 if (shift == 0) {
roland@2728 2312 __ add(dst_ptr, dst_pos, dst_ptr);
roland@2728 2313 } else {
roland@2728 2314 __ sll(dst_pos, shift, tmp);
roland@2728 2315 __ add(dst_ptr, tmp, dst_ptr);
roland@2728 2316 }
roland@2728 2317 __ mov(length, len);
roland@2728 2318 __ load_klass(dst, tmp);
roland@2728 2319
coleenp@4142 2320 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
roland@2728 2321 __ ld_ptr(tmp, ek_offset, super_k);
roland@2728 2322
stefank@3391 2323 int sco_offset = in_bytes(Klass::super_check_offset_offset());
roland@2728 2324 __ lduw(super_k, sco_offset, chk_off);
roland@2728 2325
roland@2728 2326 __ call_VM_leaf(tmp, copyfunc_addr);
roland@2728 2327
roland@2728 2328 #ifndef PRODUCT
roland@2728 2329 if (PrintC1Statistics) {
roland@2728 2330 Label failed;
kvn@3037 2331 __ br_notnull_short(O0, Assembler::pn, failed);
roland@2728 2332 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
roland@2728 2333 __ bind(failed);
roland@2728 2334 }
roland@2728 2335 #endif
roland@2728 2336
roland@2728 2337 __ br_null(O0, false, Assembler::pt, *stub->continuation());
roland@2728 2338 __ delayed()->xor3(O0, -1, tmp);
roland@2728 2339
roland@2728 2340 #ifndef PRODUCT
roland@2728 2341 if (PrintC1Statistics) {
roland@2728 2342 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3);
roland@2728 2343 }
roland@2728 2344 #endif
roland@2728 2345
roland@2728 2346 __ sub(length, tmp, length);
roland@2728 2347 __ add(src_pos, tmp, src_pos);
roland@2728 2348 __ br(Assembler::always, false, Assembler::pt, *stub->entry());
roland@2728 2349 __ delayed()->add(dst_pos, tmp, dst_pos);
roland@2728 2350
roland@2728 2351 __ bind(cont);
roland@2728 2352 } else {
roland@2728 2353 __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
roland@2728 2354 __ delayed()->nop();
roland@2728 2355 __ bind(cont);
roland@2728 2356 }
iveresov@2344 2357 }
duke@435 2358 }
duke@435 2359
duke@435 2360 #ifdef ASSERT
duke@435 2361 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
duke@435 2362 // Sanity check the known type with the incoming class. For the
duke@435 2363 // primitive case the types must match exactly with src.klass and
duke@435 2364 // dst.klass each exactly matching the default type. For the
duke@435 2365 // object array case, if no type check is needed then either the
duke@435 2366 // dst type is exactly the expected type and the src type is a
duke@435 2367 // subtype which we can't check or src is the same array as dst
duke@435 2368 // but not necessarily exactly of type default_type.
duke@435 2369 Label known_ok, halt;
coleenp@4037 2370 metadata2reg(op->expected_type()->constant_encoding(), tmp);
ehelin@5694 2371 if (UseCompressedClassPointers) {
iveresov@2344 2372 // tmp holds the default type. It currently comes uncompressed after the
iveresov@2344 2373 // load of a constant, so encode it.
roland@4159 2374 __ encode_klass_not_null(tmp);
iveresov@2344 2375 // load the raw value of the dst klass, since we will be comparing
iveresov@2344 2376 // uncompressed values directly.
iveresov@2344 2377 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2378 if (basic_type != T_OBJECT) {
iveresov@2344 2379 __ cmp(tmp, tmp2);
iveresov@2344 2380 __ br(Assembler::notEqual, false, Assembler::pn, halt);
iveresov@2344 2381 // load the raw value of the src klass.
iveresov@2344 2382 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
kvn@3037 2383 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
iveresov@2344 2384 } else {
iveresov@2344 2385 __ cmp(tmp, tmp2);
iveresov@2344 2386 __ br(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2387 __ delayed()->cmp(src, dst);
iveresov@2344 2388 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2389 __ delayed()->nop();
iveresov@2344 2390 }
duke@435 2391 } else {
iveresov@2344 2392 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2393 if (basic_type != T_OBJECT) {
iveresov@2344 2394 __ cmp(tmp, tmp2);
iveresov@2344 2395 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
iveresov@2344 2396 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
kvn@3037 2397 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
iveresov@2344 2398 } else {
iveresov@2344 2399 __ cmp(tmp, tmp2);
iveresov@2344 2400 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2401 __ delayed()->cmp(src, dst);
iveresov@2344 2402 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2403 __ delayed()->nop();
iveresov@2344 2404 }
duke@435 2405 }
duke@435 2406 __ bind(halt);
duke@435 2407 __ stop("incorrect type information in arraycopy");
duke@435 2408 __ bind(known_ok);
duke@435 2409 }
duke@435 2410 #endif
duke@435 2411
roland@2728 2412 #ifndef PRODUCT
roland@2728 2413 if (PrintC1Statistics) {
roland@2728 2414 address counter = Runtime1::arraycopy_count_address(basic_type);
roland@2728 2415 __ inc_counter(counter, G1, G3);
roland@2728 2416 }
roland@2728 2417 #endif
duke@435 2418
duke@435 2419 Register src_ptr = O0;
duke@435 2420 Register dst_ptr = O1;
duke@435 2421 Register len = O2;
duke@435 2422
duke@435 2423 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
duke@435 2424 if (shift == 0) {
duke@435 2425 __ add(src_ptr, src_pos, src_ptr);
duke@435 2426 } else {
duke@435 2427 __ sll(src_pos, shift, tmp);
duke@435 2428 __ add(src_ptr, tmp, src_ptr);
duke@435 2429 }
duke@435 2430
duke@435 2431 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
duke@435 2432 if (shift == 0) {
duke@435 2433 __ add(dst_ptr, dst_pos, dst_ptr);
duke@435 2434 } else {
duke@435 2435 __ sll(dst_pos, shift, tmp);
duke@435 2436 __ add(dst_ptr, tmp, dst_ptr);
duke@435 2437 }
duke@435 2438
roland@2728 2439 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
roland@2728 2440 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
roland@2728 2441 const char *name;
roland@2728 2442 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
roland@2728 2443
roland@2728 2444 // arraycopy stubs takes a length in number of elements, so don't scale it.
roland@2728 2445 __ mov(length, len);
roland@2728 2446 __ call_VM_leaf(tmp, entry);
duke@435 2447
duke@435 2448 __ bind(*stub->continuation());
duke@435 2449 }
duke@435 2450
duke@435 2451
duke@435 2452 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
duke@435 2453 if (dest->is_single_cpu()) {
duke@435 2454 #ifdef _LP64
duke@435 2455 if (left->type() == T_OBJECT) {
duke@435 2456 switch (code) {
duke@435 2457 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2458 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2459 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2460 default: ShouldNotReachHere();
duke@435 2461 }
duke@435 2462 } else
duke@435 2463 #endif
duke@435 2464 switch (code) {
duke@435 2465 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2466 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2467 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2468 default: ShouldNotReachHere();
duke@435 2469 }
duke@435 2470 } else {
duke@435 2471 #ifdef _LP64
duke@435 2472 switch (code) {
duke@435 2473 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2474 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2475 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2476 default: ShouldNotReachHere();
duke@435 2477 }
duke@435 2478 #else
duke@435 2479 switch (code) {
duke@435 2480 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2481 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2482 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2483 default: ShouldNotReachHere();
duke@435 2484 }
duke@435 2485 #endif
duke@435 2486 }
duke@435 2487 }
duke@435 2488
duke@435 2489
duke@435 2490 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
duke@435 2491 #ifdef _LP64
duke@435 2492 if (left->type() == T_OBJECT) {
duke@435 2493 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
duke@435 2494 Register l = left->as_register();
duke@435 2495 Register d = dest->as_register_lo();
duke@435 2496 switch (code) {
duke@435 2497 case lir_shl: __ sllx (l, count, d); break;
duke@435 2498 case lir_shr: __ srax (l, count, d); break;
duke@435 2499 case lir_ushr: __ srlx (l, count, d); break;
duke@435 2500 default: ShouldNotReachHere();
duke@435 2501 }
duke@435 2502 return;
duke@435 2503 }
duke@435 2504 #endif
duke@435 2505
duke@435 2506 if (dest->is_single_cpu()) {
duke@435 2507 count = count & 0x1F; // Java spec
duke@435 2508 switch (code) {
duke@435 2509 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
duke@435 2510 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
duke@435 2511 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
duke@435 2512 default: ShouldNotReachHere();
duke@435 2513 }
duke@435 2514 } else if (dest->is_double_cpu()) {
duke@435 2515 count = count & 63; // Java spec
duke@435 2516 switch (code) {
duke@435 2517 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2518 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2519 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2520 default: ShouldNotReachHere();
duke@435 2521 }
duke@435 2522 } else {
duke@435 2523 ShouldNotReachHere();
duke@435 2524 }
duke@435 2525 }
duke@435 2526
duke@435 2527
duke@435 2528 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
duke@435 2529 assert(op->tmp1()->as_register() == G1 &&
duke@435 2530 op->tmp2()->as_register() == G3 &&
duke@435 2531 op->tmp3()->as_register() == G4 &&
duke@435 2532 op->obj()->as_register() == O0 &&
duke@435 2533 op->klass()->as_register() == G5, "must be");
duke@435 2534 if (op->init_check()) {
coleenp@3368 2535 __ ldub(op->klass()->as_register(),
coleenp@4037 2536 in_bytes(InstanceKlass::init_state_offset()),
duke@435 2537 op->tmp1()->as_register());
duke@435 2538 add_debug_info_for_null_check_here(op->stub()->info());
coleenp@4037 2539 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
duke@435 2540 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
duke@435 2541 __ delayed()->nop();
duke@435 2542 }
duke@435 2543 __ allocate_object(op->obj()->as_register(),
duke@435 2544 op->tmp1()->as_register(),
duke@435 2545 op->tmp2()->as_register(),
duke@435 2546 op->tmp3()->as_register(),
duke@435 2547 op->header_size(),
duke@435 2548 op->object_size(),
duke@435 2549 op->klass()->as_register(),
duke@435 2550 *op->stub()->entry());
duke@435 2551 __ bind(*op->stub()->continuation());
duke@435 2552 __ verify_oop(op->obj()->as_register());
duke@435 2553 }
duke@435 2554
duke@435 2555
duke@435 2556 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
duke@435 2557 assert(op->tmp1()->as_register() == G1 &&
duke@435 2558 op->tmp2()->as_register() == G3 &&
duke@435 2559 op->tmp3()->as_register() == G4 &&
duke@435 2560 op->tmp4()->as_register() == O1 &&
duke@435 2561 op->klass()->as_register() == G5, "must be");
iveresov@2432 2562
iveresov@2432 2563 LP64_ONLY( __ signx(op->len()->as_register()); )
duke@435 2564 if (UseSlowPath ||
duke@435 2565 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
duke@435 2566 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
never@1813 2567 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 2568 __ delayed()->nop();
duke@435 2569 } else {
duke@435 2570 __ allocate_array(op->obj()->as_register(),
duke@435 2571 op->len()->as_register(),
duke@435 2572 op->tmp1()->as_register(),
duke@435 2573 op->tmp2()->as_register(),
duke@435 2574 op->tmp3()->as_register(),
duke@435 2575 arrayOopDesc::header_size(op->type()),
kvn@464 2576 type2aelembytes(op->type()),
duke@435 2577 op->klass()->as_register(),
duke@435 2578 *op->stub()->entry());
duke@435 2579 }
duke@435 2580 __ bind(*op->stub()->continuation());
duke@435 2581 }
duke@435 2582
duke@435 2583
iveresov@2138 2584 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
iveresov@2138 2585 ciMethodData *md, ciProfileData *data,
iveresov@2138 2586 Register recv, Register tmp1, Label* update_done) {
iveresov@2138 2587 uint i;
iveresov@2138 2588 for (i = 0; i < VirtualCallData::row_limit(); i++) {
iveresov@2138 2589 Label next_test;
iveresov@2138 2590 // See if the receiver is receiver[n].
iveresov@2138 2591 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
iveresov@2138 2592 mdo_offset_bias);
iveresov@2138 2593 __ ld_ptr(receiver_addr, tmp1);
morris@5980 2594 __ verify_klass_ptr(tmp1);
kvn@3037 2595 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
iveresov@2138 2596 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
iveresov@2138 2597 mdo_offset_bias);
iveresov@2138 2598 __ ld_ptr(data_addr, tmp1);
iveresov@2138 2599 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2600 __ st_ptr(tmp1, data_addr);
kvn@3037 2601 __ ba(*update_done);
iveresov@2138 2602 __ delayed()->nop();
iveresov@2138 2603 __ bind(next_test);
iveresov@2138 2604 }
iveresov@2138 2605
iveresov@2138 2606 // Didn't find receiver; find next empty slot and fill it in
iveresov@2138 2607 for (i = 0; i < VirtualCallData::row_limit(); i++) {
iveresov@2138 2608 Label next_test;
iveresov@2138 2609 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
iveresov@2138 2610 mdo_offset_bias);
iveresov@2344 2611 __ ld_ptr(recv_addr, tmp1);
kvn@3037 2612 __ br_notnull_short(tmp1, Assembler::pt, next_test);
iveresov@2138 2613 __ st_ptr(recv, recv_addr);
iveresov@2138 2614 __ set(DataLayout::counter_increment, tmp1);
iveresov@2138 2615 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
iveresov@2138 2616 mdo_offset_bias);
kvn@3037 2617 __ ba(*update_done);
iveresov@2138 2618 __ delayed()->nop();
iveresov@2138 2619 __ bind(next_test);
iveresov@2138 2620 }
iveresov@2138 2621 }
iveresov@2138 2622
iveresov@2146 2623
iveresov@2146 2624 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
iveresov@2146 2625 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
iveresov@2349 2626 md = method->method_data_or_null();
iveresov@2349 2627 assert(md != NULL, "Sanity");
iveresov@2146 2628 data = md->bci_to_data(bci);
iveresov@2146 2629 assert(data != NULL, "need data for checkcast");
iveresov@2146 2630 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
iveresov@2146 2631 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
iveresov@2146 2632 // The offset is large so bias the mdo by the base of the slot so
iveresov@2146 2633 // that the ld can use simm13s to reference the slots of the data
iveresov@2146 2634 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
iveresov@2146 2635 }
iveresov@2146 2636 }
iveresov@2146 2637
iveresov@2146 2638 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
iveresov@2138 2639 // we always need a stub for the failure case.
iveresov@2138 2640 CodeStub* stub = op->stub();
iveresov@2138 2641 Register obj = op->object()->as_register();
iveresov@2138 2642 Register k_RInfo = op->tmp1()->as_register();
iveresov@2138 2643 Register klass_RInfo = op->tmp2()->as_register();
iveresov@2138 2644 Register dst = op->result_opr()->as_register();
iveresov@2138 2645 Register Rtmp1 = op->tmp3()->as_register();
iveresov@2138 2646 ciKlass* k = op->klass();
iveresov@2138 2647
iveresov@2138 2648
iveresov@2138 2649 if (obj == k_RInfo) {
iveresov@2138 2650 k_RInfo = klass_RInfo;
iveresov@2138 2651 klass_RInfo = obj;
iveresov@2138 2652 }
iveresov@2138 2653
iveresov@2138 2654 ciMethodData* md;
iveresov@2138 2655 ciProfileData* data;
iveresov@2138 2656 int mdo_offset_bias = 0;
iveresov@2138 2657 if (op->should_profile()) {
iveresov@2138 2658 ciMethod* method = op->profiled_method();
iveresov@2138 2659 assert(method != NULL, "Should have method");
iveresov@2146 2660 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
iveresov@2146 2661
iveresov@2146 2662 Label not_null;
kvn@3037 2663 __ br_notnull_short(obj, Assembler::pn, not_null);
iveresov@2138 2664 Register mdo = k_RInfo;
iveresov@2138 2665 Register data_val = Rtmp1;
coleenp@4037 2666 metadata2reg(md->constant_encoding(), mdo);
iveresov@2138 2667 if (mdo_offset_bias > 0) {
iveresov@2138 2668 __ set(mdo_offset_bias, data_val);
iveresov@2138 2669 __ add(mdo, data_val, mdo);
iveresov@2138 2670 }
iveresov@2138 2671 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
iveresov@2138 2672 __ ldub(flags_addr, data_val);
iveresov@2138 2673 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
iveresov@2138 2674 __ stb(data_val, flags_addr);
kvn@3037 2675 __ ba(*obj_is_null);
iveresov@2146 2676 __ delayed()->nop();
iveresov@2146 2677 __ bind(not_null);
iveresov@2146 2678 } else {
iveresov@2146 2679 __ br_null(obj, false, Assembler::pn, *obj_is_null);
iveresov@2146 2680 __ delayed()->nop();
iveresov@2138 2681 }
iveresov@2146 2682
iveresov@2146 2683 Label profile_cast_failure, profile_cast_success;
iveresov@2146 2684 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
iveresov@2146 2685 Label *success_target = op->should_profile() ? &profile_cast_success : success;
iveresov@2138 2686
iveresov@2138 2687 // patching may screw with our temporaries on sparc,
iveresov@2138 2688 // so let's do it before loading the class
iveresov@2138 2689 if (k->is_loaded()) {
coleenp@4037 2690 metadata2reg(k->constant_encoding(), k_RInfo);
iveresov@2138 2691 } else {
coleenp@4037 2692 klass2reg_with_patching(k_RInfo, op->info_for_patch());
iveresov@2138 2693 }
iveresov@2138 2694 assert(obj != k_RInfo, "must be different");
iveresov@2138 2695
iveresov@2138 2696 // get object class
iveresov@2138 2697 // not a safepoint as obj null check happens earlier
iveresov@2344 2698 __ load_klass(obj, klass_RInfo);
iveresov@2138 2699 if (op->fast_check()) {
iveresov@2138 2700 assert_different_registers(klass_RInfo, k_RInfo);
iveresov@2138 2701 __ cmp(k_RInfo, klass_RInfo);
iveresov@2138 2702 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
iveresov@2138 2703 __ delayed()->nop();
iveresov@2138 2704 } else {
iveresov@2138 2705 bool need_slow_path = true;
iveresov@2138 2706 if (k->is_loaded()) {
stefank@3391 2707 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
iveresov@2138 2708 need_slow_path = false;
iveresov@2138 2709 // perform the fast part of the checking logic
iveresov@2138 2710 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
iveresov@2146 2711 (need_slow_path ? success_target : NULL),
iveresov@2138 2712 failure_target, NULL,
iveresov@2138 2713 RegisterOrConstant(k->super_check_offset()));
iveresov@2138 2714 } else {
iveresov@2138 2715 // perform the fast part of the checking logic
iveresov@2146 2716 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
iveresov@2138 2717 failure_target, NULL);
iveresov@2138 2718 }
iveresov@2138 2719 if (need_slow_path) {
iveresov@2138 2720 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
iveresov@2138 2721 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
iveresov@2138 2722 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
iveresov@2138 2723 __ delayed()->nop();
iveresov@2138 2724 __ cmp(G3, 0);
iveresov@2138 2725 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
iveresov@2138 2726 __ delayed()->nop();
iveresov@2146 2727 // Fall through to success case
iveresov@2138 2728 }
iveresov@2138 2729 }
iveresov@2138 2730
iveresov@2138 2731 if (op->should_profile()) {
iveresov@2138 2732 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
iveresov@2138 2733 assert_different_registers(obj, mdo, recv, tmp1);
iveresov@2146 2734 __ bind(profile_cast_success);
coleenp@4037 2735 metadata2reg(md->constant_encoding(), mdo);
iveresov@2138 2736 if (mdo_offset_bias > 0) {
iveresov@2138 2737 __ set(mdo_offset_bias, tmp1);
iveresov@2138 2738 __ add(mdo, tmp1, mdo);
iveresov@2138 2739 }
iveresov@2344 2740 __ load_klass(obj, recv);
iveresov@2146 2741 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
iveresov@2138 2742 // Jump over the failure case
kvn@3037 2743 __ ba(*success);
iveresov@2138 2744 __ delayed()->nop();
iveresov@2138 2745 // Cast failure case
iveresov@2138 2746 __ bind(profile_cast_failure);
coleenp@4037 2747 metadata2reg(md->constant_encoding(), mdo);
iveresov@2138 2748 if (mdo_offset_bias > 0) {
iveresov@2138 2749 __ set(mdo_offset_bias, tmp1);
iveresov@2138 2750 __ add(mdo, tmp1, mdo);
iveresov@2138 2751 }
iveresov@2138 2752 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
iveresov@2138 2753 __ ld_ptr(data_addr, tmp1);
iveresov@2138 2754 __ sub(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2755 __ st_ptr(tmp1, data_addr);
kvn@3037 2756 __ ba(*failure);
iveresov@2138 2757 __ delayed()->nop();
iveresov@2138 2758 }
kvn@3037 2759 __ ba(*success);
iveresov@2146 2760 __ delayed()->nop();
iveresov@2138 2761 }
iveresov@2138 2762
duke@435 2763 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
duke@435 2764 LIR_Code code = op->code();
duke@435 2765 if (code == lir_store_check) {
duke@435 2766 Register value = op->object()->as_register();
duke@435 2767 Register array = op->array()->as_register();
duke@435 2768 Register k_RInfo = op->tmp1()->as_register();
duke@435 2769 Register klass_RInfo = op->tmp2()->as_register();
duke@435 2770 Register Rtmp1 = op->tmp3()->as_register();
duke@435 2771
duke@435 2772 __ verify_oop(value);
duke@435 2773 CodeStub* stub = op->stub();
iveresov@2146 2774 // check if it needs to be profiled
iveresov@2146 2775 ciMethodData* md;
iveresov@2146 2776 ciProfileData* data;
iveresov@2146 2777 int mdo_offset_bias = 0;
iveresov@2146 2778 if (op->should_profile()) {
iveresov@2146 2779 ciMethod* method = op->profiled_method();
iveresov@2146 2780 assert(method != NULL, "Should have method");
iveresov@2146 2781 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
iveresov@2146 2782 }
iveresov@2146 2783 Label profile_cast_success, profile_cast_failure, done;
iveresov@2146 2784 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
iveresov@2146 2785 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
iveresov@2146 2786
iveresov@2146 2787 if (op->should_profile()) {
iveresov@2146 2788 Label not_null;
kvn@3037 2789 __ br_notnull_short(value, Assembler::pn, not_null);
iveresov@2146 2790 Register mdo = k_RInfo;
iveresov@2146 2791 Register data_val = Rtmp1;
coleenp@4037 2792 metadata2reg(md->constant_encoding(), mdo);
iveresov@2146 2793 if (mdo_offset_bias > 0) {
iveresov@2146 2794 __ set(mdo_offset_bias, data_val);
iveresov@2146 2795 __ add(mdo, data_val, mdo);
iveresov@2146 2796 }
iveresov@2146 2797 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
iveresov@2146 2798 __ ldub(flags_addr, data_val);
iveresov@2146 2799 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
iveresov@2146 2800 __ stb(data_val, flags_addr);
kvn@3037 2801 __ ba_short(done);
iveresov@2146 2802 __ bind(not_null);
iveresov@2146 2803 } else {
kvn@3037 2804 __ br_null_short(value, Assembler::pn, done);
iveresov@2146 2805 }
iveresov@2344 2806 add_debug_info_for_null_check_here(op->info_for_exception());
iveresov@2344 2807 __ load_klass(array, k_RInfo);
iveresov@2344 2808 __ load_klass(value, klass_RInfo);
duke@435 2809
duke@435 2810 // get instance klass
coleenp@4142 2811 __ ld_ptr(Address(k_RInfo, ObjArrayKlass::element_klass_offset()), k_RInfo);
jrose@1079 2812 // perform the fast part of the checking logic
iveresov@2146 2813 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
jrose@1079 2814
jrose@1079 2815 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
jrose@1079 2816 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
duke@435 2817 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
duke@435 2818 __ delayed()->nop();
duke@435 2819 __ cmp(G3, 0);
iveresov@2146 2820 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
duke@435 2821 __ delayed()->nop();
iveresov@2146 2822 // fall through to the success case
iveresov@2146 2823
iveresov@2146 2824 if (op->should_profile()) {
iveresov@2146 2825 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
iveresov@2146 2826 assert_different_registers(value, mdo, recv, tmp1);
iveresov@2146 2827 __ bind(profile_cast_success);
coleenp@4037 2828 metadata2reg(md->constant_encoding(), mdo);
iveresov@2146 2829 if (mdo_offset_bias > 0) {
iveresov@2146 2830 __ set(mdo_offset_bias, tmp1);
iveresov@2146 2831 __ add(mdo, tmp1, mdo);
iveresov@2146 2832 }
iveresov@2344 2833 __ load_klass(value, recv);
iveresov@2146 2834 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
kvn@3037 2835 __ ba_short(done);
iveresov@2146 2836 // Cast failure case
iveresov@2146 2837 __ bind(profile_cast_failure);
coleenp@4037 2838 metadata2reg(md->constant_encoding(), mdo);
iveresov@2146 2839 if (mdo_offset_bias > 0) {
iveresov@2146 2840 __ set(mdo_offset_bias, tmp1);
iveresov@2146 2841 __ add(mdo, tmp1, mdo);
iveresov@2146 2842 }
iveresov@2146 2843 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
iveresov@2146 2844 __ ld_ptr(data_addr, tmp1);
iveresov@2146 2845 __ sub(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2146 2846 __ st_ptr(tmp1, data_addr);
kvn@3037 2847 __ ba(*stub->entry());
iveresov@2146 2848 __ delayed()->nop();
iveresov@2146 2849 }
duke@435 2850 __ bind(done);
iveresov@2146 2851 } else if (code == lir_checkcast) {
iveresov@2146 2852 Register obj = op->object()->as_register();
iveresov@2146 2853 Register dst = op->result_opr()->as_register();
iveresov@2146 2854 Label success;
iveresov@2146 2855 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
iveresov@2146 2856 __ bind(success);
iveresov@2146 2857 __ mov(obj, dst);
duke@435 2858 } else if (code == lir_instanceof) {
duke@435 2859 Register obj = op->object()->as_register();
duke@435 2860 Register dst = op->result_opr()->as_register();
iveresov@2146 2861 Label success, failure, done;
iveresov@2146 2862 emit_typecheck_helper(op, &success, &failure, &failure);
iveresov@2146 2863 __ bind(failure);
iveresov@2146 2864 __ set(0, dst);
kvn@3037 2865 __ ba_short(done);
iveresov@2146 2866 __ bind(success);
iveresov@2146 2867 __ set(1, dst);
iveresov@2146 2868 __ bind(done);
duke@435 2869 } else {
duke@435 2870 ShouldNotReachHere();
duke@435 2871 }
duke@435 2872
duke@435 2873 }
duke@435 2874
duke@435 2875
duke@435 2876 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
duke@435 2877 if (op->code() == lir_cas_long) {
duke@435 2878 assert(VM_Version::supports_cx8(), "wrong machine");
duke@435 2879 Register addr = op->addr()->as_pointer_register();
duke@435 2880 Register cmp_value_lo = op->cmp_value()->as_register_lo();
duke@435 2881 Register cmp_value_hi = op->cmp_value()->as_register_hi();
duke@435 2882 Register new_value_lo = op->new_value()->as_register_lo();
duke@435 2883 Register new_value_hi = op->new_value()->as_register_hi();
duke@435 2884 Register t1 = op->tmp1()->as_register();
duke@435 2885 Register t2 = op->tmp2()->as_register();
duke@435 2886 #ifdef _LP64
duke@435 2887 __ mov(cmp_value_lo, t1);
duke@435 2888 __ mov(new_value_lo, t2);
iveresov@2412 2889 // perform the compare and swap operation
iveresov@2412 2890 __ casx(addr, t1, t2);
iveresov@2412 2891 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
iveresov@2412 2892 // overwritten with the original value in "addr" and will be equal to t1.
iveresov@2412 2893 __ cmp(t1, t2);
duke@435 2894 #else
duke@435 2895 // move high and low halves of long values into single registers
duke@435 2896 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
duke@435 2897 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
duke@435 2898 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
duke@435 2899 __ sllx(new_value_hi, 32, t2);
duke@435 2900 __ srl(new_value_lo, 0, new_value_lo);
duke@435 2901 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
duke@435 2902 // perform the compare and swap operation
duke@435 2903 __ casx(addr, t1, t2);
duke@435 2904 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
duke@435 2905 // overwritten with the original value in "addr" and will be equal to t1.
iveresov@2412 2906 // Produce icc flag for 32bit.
iveresov@2412 2907 __ sub(t1, t2, t2);
iveresov@2412 2908 __ srlx(t2, 32, t1);
iveresov@2412 2909 __ orcc(t2, t1, G0);
iveresov@2412 2910 #endif
duke@435 2911 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
duke@435 2912 Register addr = op->addr()->as_pointer_register();
duke@435 2913 Register cmp_value = op->cmp_value()->as_register();
duke@435 2914 Register new_value = op->new_value()->as_register();
duke@435 2915 Register t1 = op->tmp1()->as_register();
duke@435 2916 Register t2 = op->tmp2()->as_register();
duke@435 2917 __ mov(cmp_value, t1);
duke@435 2918 __ mov(new_value, t2);
duke@435 2919 if (op->code() == lir_cas_obj) {
iveresov@2344 2920 if (UseCompressedOops) {
iveresov@2344 2921 __ encode_heap_oop(t1);
iveresov@2344 2922 __ encode_heap_oop(t2);
duke@435 2923 __ cas(addr, t1, t2);
iveresov@2344 2924 } else {
never@2352 2925 __ cas_ptr(addr, t1, t2);
duke@435 2926 }
iveresov@2344 2927 } else {
iveresov@2344 2928 __ cas(addr, t1, t2);
iveresov@2344 2929 }
duke@435 2930 __ cmp(t1, t2);
duke@435 2931 } else {
duke@435 2932 Unimplemented();
duke@435 2933 }
duke@435 2934 }
duke@435 2935
duke@435 2936 void LIR_Assembler::set_24bit_FPU() {
duke@435 2937 Unimplemented();
duke@435 2938 }
duke@435 2939
duke@435 2940
duke@435 2941 void LIR_Assembler::reset_FPU() {
duke@435 2942 Unimplemented();
duke@435 2943 }
duke@435 2944
duke@435 2945
duke@435 2946 void LIR_Assembler::breakpoint() {
duke@435 2947 __ breakpoint_trap();
duke@435 2948 }
duke@435 2949
duke@435 2950
duke@435 2951 void LIR_Assembler::push(LIR_Opr opr) {
duke@435 2952 Unimplemented();
duke@435 2953 }
duke@435 2954
duke@435 2955
duke@435 2956 void LIR_Assembler::pop(LIR_Opr opr) {
duke@435 2957 Unimplemented();
duke@435 2958 }
duke@435 2959
duke@435 2960
duke@435 2961 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
duke@435 2962 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
duke@435 2963 Register dst = dst_opr->as_register();
duke@435 2964 Register reg = mon_addr.base();
duke@435 2965 int offset = mon_addr.disp();
duke@435 2966 // compute pointer to BasicLock
duke@435 2967 if (mon_addr.is_simm13()) {
duke@435 2968 __ add(reg, offset, dst);
duke@435 2969 } else {
duke@435 2970 __ set(offset, dst);
duke@435 2971 __ add(dst, reg, dst);
duke@435 2972 }
duke@435 2973 }
duke@435 2974
drchase@5353 2975 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
drchase@5353 2976 fatal("CRC32 intrinsic is not implemented on this platform");
drchase@5353 2977 }
duke@435 2978
duke@435 2979 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
duke@435 2980 Register obj = op->obj_opr()->as_register();
duke@435 2981 Register hdr = op->hdr_opr()->as_register();
duke@435 2982 Register lock = op->lock_opr()->as_register();
duke@435 2983
duke@435 2984 // obj may not be an oop
duke@435 2985 if (op->code() == lir_lock) {
duke@435 2986 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
duke@435 2987 if (UseFastLocking) {
duke@435 2988 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 2989 // add debug info for NullPointerException only if one is possible
duke@435 2990 if (op->info() != NULL) {
duke@435 2991 add_debug_info_for_null_check_here(op->info());
duke@435 2992 }
duke@435 2993 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
duke@435 2994 } else {
duke@435 2995 // always do slow locking
duke@435 2996 // note: the slow locking code could be inlined here, however if we use
duke@435 2997 // slow locking, speed doesn't matter anyway and this solution is
duke@435 2998 // simpler and requires less duplicated code - additionally, the
duke@435 2999 // slow locking code is the same in either case which simplifies
duke@435 3000 // debugging
duke@435 3001 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 3002 __ delayed()->nop();
duke@435 3003 }
duke@435 3004 } else {
duke@435 3005 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
duke@435 3006 if (UseFastLocking) {
duke@435 3007 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 3008 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
duke@435 3009 } else {
duke@435 3010 // always do slow unlocking
duke@435 3011 // note: the slow unlocking code could be inlined here, however if we use
duke@435 3012 // slow unlocking, speed doesn't matter anyway and this solution is
duke@435 3013 // simpler and requires less duplicated code - additionally, the
duke@435 3014 // slow unlocking code is the same in either case which simplifies
duke@435 3015 // debugging
duke@435 3016 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 3017 __ delayed()->nop();
duke@435 3018 }
duke@435 3019 }
duke@435 3020 __ bind(*op->stub()->continuation());
duke@435 3021 }
duke@435 3022
duke@435 3023
duke@435 3024 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
duke@435 3025 ciMethod* method = op->profiled_method();
duke@435 3026 int bci = op->profiled_bci();
twisti@3969 3027 ciMethod* callee = op->profiled_callee();
duke@435 3028
duke@435 3029 // Update counter for all call types
iveresov@2349 3030 ciMethodData* md = method->method_data_or_null();
iveresov@2349 3031 assert(md != NULL, "Sanity");
duke@435 3032 ciProfileData* data = md->bci_to_data(bci);
duke@435 3033 assert(data->is_CounterData(), "need CounterData for calls");
duke@435 3034 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
iveresov@2138 3035 Register mdo = op->mdo()->as_register();
iveresov@2138 3036 #ifdef _LP64
iveresov@2138 3037 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
iveresov@2138 3038 Register tmp1 = op->tmp1()->as_register_lo();
iveresov@2138 3039 #else
duke@435 3040 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
duke@435 3041 Register tmp1 = op->tmp1()->as_register();
iveresov@2138 3042 #endif
coleenp@4037 3043 metadata2reg(md->constant_encoding(), mdo);
duke@435 3044 int mdo_offset_bias = 0;
duke@435 3045 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
duke@435 3046 data->size_in_bytes())) {
duke@435 3047 // The offset is large so bias the mdo by the base of the slot so
duke@435 3048 // that the ld can use simm13s to reference the slots of the data
duke@435 3049 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
duke@435 3050 __ set(mdo_offset_bias, O7);
duke@435 3051 __ add(mdo, O7, mdo);
duke@435 3052 }
duke@435 3053
twisti@1162 3054 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
duke@435 3055 Bytecodes::Code bc = method->java_code_at_bci(bci);
twisti@3969 3056 const bool callee_is_static = callee->is_loaded() && callee->is_static();
duke@435 3057 // Perform additional virtual call profiling for invokevirtual and
duke@435 3058 // invokeinterface bytecodes
duke@435 3059 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
twisti@3969 3060 !callee_is_static && // required for optimized MH invokes
iveresov@2138 3061 C1ProfileVirtualCalls) {
duke@435 3062 assert(op->recv()->is_single_cpu(), "recv must be allocated");
duke@435 3063 Register recv = op->recv()->as_register();
duke@435 3064 assert_different_registers(mdo, tmp1, recv);
duke@435 3065 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
duke@435 3066 ciKlass* known_klass = op->known_holder();
iveresov@2138 3067 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
duke@435 3068 // We know the type that will be seen at this call site; we can
coleenp@4037 3069 // statically update the MethodData* rather than needing to do
duke@435 3070 // dynamic tests on the receiver type
duke@435 3071
duke@435 3072 // NOTE: we should probably put a lock around this search to
duke@435 3073 // avoid collisions by concurrent compilations
duke@435 3074 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
duke@435 3075 uint i;
duke@435 3076 for (i = 0; i < VirtualCallData::row_limit(); i++) {
duke@435 3077 ciKlass* receiver = vc_data->receiver(i);
duke@435 3078 if (known_klass->equals(receiver)) {
twisti@1162 3079 Address data_addr(mdo, md->byte_offset_of_slot(data,
twisti@1162 3080 VirtualCallData::receiver_count_offset(i)) -
duke@435 3081 mdo_offset_bias);
iveresov@2138 3082 __ ld_ptr(data_addr, tmp1);
duke@435 3083 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3084 __ st_ptr(tmp1, data_addr);
duke@435 3085 return;
duke@435 3086 }
duke@435 3087 }
duke@435 3088
duke@435 3089 // Receiver type not found in profile data; select an empty slot
duke@435 3090
duke@435 3091 // Note that this is less efficient than it should be because it
duke@435 3092 // always does a write to the receiver part of the
duke@435 3093 // VirtualCallData rather than just the first time
duke@435 3094 for (i = 0; i < VirtualCallData::row_limit(); i++) {
duke@435 3095 ciKlass* receiver = vc_data->receiver(i);
duke@435 3096 if (receiver == NULL) {
twisti@1162 3097 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
duke@435 3098 mdo_offset_bias);
coleenp@4037 3099 metadata2reg(known_klass->constant_encoding(), tmp1);
duke@435 3100 __ st_ptr(tmp1, recv_addr);
twisti@1162 3101 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
duke@435 3102 mdo_offset_bias);
iveresov@2138 3103 __ ld_ptr(data_addr, tmp1);
duke@435 3104 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3105 __ st_ptr(tmp1, data_addr);
duke@435 3106 return;
duke@435 3107 }
duke@435 3108 }
duke@435 3109 } else {
iveresov@2344 3110 __ load_klass(recv, recv);
duke@435 3111 Label update_done;
iveresov@2138 3112 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
kvn@1686 3113 // Receiver did not match any saved receiver and there is no empty row for it.
kvn@1686 3114 // Increment total counter to indicate polymorphic case.
iveresov@2138 3115 __ ld_ptr(counter_addr, tmp1);
kvn@1686 3116 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3117 __ st_ptr(tmp1, counter_addr);
duke@435 3118
duke@435 3119 __ bind(update_done);
duke@435 3120 }
kvn@1686 3121 } else {
kvn@1686 3122 // Static call
iveresov@2138 3123 __ ld_ptr(counter_addr, tmp1);
kvn@1686 3124 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3125 __ st_ptr(tmp1, counter_addr);
duke@435 3126 }
duke@435 3127 }
duke@435 3128
roland@5914 3129 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
roland@6223 3130 Register obj = op->obj()->as_register();
roland@6223 3131 Register tmp1 = op->tmp()->as_pointer_register();
roland@6223 3132 Register tmp2 = G1;
roland@6223 3133 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
roland@6223 3134 ciKlass* exact_klass = op->exact_klass();
roland@6223 3135 intptr_t current_klass = op->current_klass();
roland@6223 3136 bool not_null = op->not_null();
roland@6223 3137 bool no_conflict = op->no_conflict();
roland@6223 3138
roland@6223 3139 Label update, next, none;
roland@6223 3140
roland@6223 3141 bool do_null = !not_null;
roland@6223 3142 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
roland@6223 3143 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
roland@6223 3144
roland@6223 3145 assert(do_null || do_update, "why are we here?");
roland@6223 3146 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
roland@6223 3147
roland@6223 3148 __ verify_oop(obj);
roland@6223 3149
roland@6223 3150 if (tmp1 != obj) {
roland@6223 3151 __ mov(obj, tmp1);
roland@6223 3152 }
roland@6223 3153 if (do_null) {
roland@6223 3154 __ br_notnull_short(tmp1, Assembler::pt, update);
roland@6223 3155 if (!TypeEntries::was_null_seen(current_klass)) {
roland@6223 3156 __ ld_ptr(mdo_addr, tmp1);
roland@6223 3157 __ or3(tmp1, TypeEntries::null_seen, tmp1);
roland@6223 3158 __ st_ptr(tmp1, mdo_addr);
roland@6223 3159 }
roland@6223 3160 if (do_update) {
roland@6223 3161 __ ba(next);
roland@6223 3162 __ delayed()->nop();
roland@6223 3163 }
roland@6223 3164 #ifdef ASSERT
roland@6223 3165 } else {
roland@6223 3166 __ br_notnull_short(tmp1, Assembler::pt, update);
roland@6223 3167 __ stop("unexpect null obj");
roland@6223 3168 #endif
roland@6223 3169 }
roland@6223 3170
roland@6223 3171 __ bind(update);
roland@6223 3172
roland@6223 3173 if (do_update) {
roland@6223 3174 #ifdef ASSERT
roland@6223 3175 if (exact_klass != NULL) {
roland@6223 3176 Label ok;
roland@6223 3177 __ load_klass(tmp1, tmp1);
roland@6223 3178 metadata2reg(exact_klass->constant_encoding(), tmp2);
roland@6223 3179 __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok);
roland@6223 3180 __ stop("exact klass and actual klass differ");
roland@6223 3181 __ bind(ok);
roland@6223 3182 }
roland@6223 3183 #endif
roland@6223 3184
roland@6223 3185 Label do_update;
roland@6223 3186 __ ld_ptr(mdo_addr, tmp2);
roland@6223 3187
roland@6223 3188 if (!no_conflict) {
roland@6223 3189 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
roland@6223 3190 if (exact_klass != NULL) {
roland@6223 3191 metadata2reg(exact_klass->constant_encoding(), tmp1);
roland@6223 3192 } else {
roland@6223 3193 __ load_klass(tmp1, tmp1);
roland@6223 3194 }
roland@6223 3195
roland@6223 3196 __ xor3(tmp1, tmp2, tmp1);
roland@6223 3197 __ btst(TypeEntries::type_klass_mask, tmp1);
roland@6223 3198 // klass seen before, nothing to do. The unknown bit may have been
roland@6223 3199 // set already but no need to check.
roland@6223 3200 __ brx(Assembler::zero, false, Assembler::pt, next);
roland@6223 3201 __ delayed()->
roland@6223 3202
roland@6223 3203 btst(TypeEntries::type_unknown, tmp1);
roland@6223 3204 // already unknown. Nothing to do anymore.
roland@6223 3205 __ brx(Assembler::notZero, false, Assembler::pt, next);
roland@6223 3206
roland@6223 3207 if (TypeEntries::is_type_none(current_klass)) {
roland@6223 3208 __ delayed()->btst(TypeEntries::type_mask, tmp2);
roland@6223 3209 __ brx(Assembler::zero, true, Assembler::pt, do_update);
roland@6223 3210 // first time here. Set profile type.
roland@6223 3211 __ delayed()->or3(tmp2, tmp1, tmp2);
roland@6223 3212 } else {
roland@6223 3213 __ delayed()->nop();
roland@6223 3214 }
roland@6223 3215 } else {
roland@6223 3216 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
roland@6223 3217 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
roland@6223 3218
roland@6223 3219 __ btst(TypeEntries::type_unknown, tmp2);
roland@6223 3220 // already unknown. Nothing to do anymore.
roland@6223 3221 __ brx(Assembler::notZero, false, Assembler::pt, next);
roland@6223 3222 __ delayed()->nop();
roland@6223 3223 }
roland@6223 3224
roland@6223 3225 // different than before. Cannot keep accurate profile.
roland@6223 3226 __ or3(tmp2, TypeEntries::type_unknown, tmp2);
roland@6223 3227 } else {
roland@6223 3228 // There's a single possible klass at this profile point
roland@6223 3229 assert(exact_klass != NULL, "should be");
roland@6223 3230 if (TypeEntries::is_type_none(current_klass)) {
roland@6223 3231 metadata2reg(exact_klass->constant_encoding(), tmp1);
roland@6223 3232 __ xor3(tmp1, tmp2, tmp1);
roland@6223 3233 __ btst(TypeEntries::type_klass_mask, tmp1);
roland@6223 3234 __ brx(Assembler::zero, false, Assembler::pt, next);
roland@6223 3235 #ifdef ASSERT
roland@6223 3236
roland@6223 3237 {
roland@6223 3238 Label ok;
roland@6223 3239 __ delayed()->btst(TypeEntries::type_mask, tmp2);
roland@6223 3240 __ brx(Assembler::zero, true, Assembler::pt, ok);
roland@6223 3241 __ delayed()->nop();
roland@6223 3242
roland@6223 3243 __ stop("unexpected profiling mismatch");
roland@6223 3244 __ bind(ok);
roland@6223 3245 }
roland@6223 3246 // first time here. Set profile type.
roland@6223 3247 __ or3(tmp2, tmp1, tmp2);
roland@6223 3248 #else
roland@6223 3249 // first time here. Set profile type.
roland@6223 3250 __ delayed()->or3(tmp2, tmp1, tmp2);
roland@6223 3251 #endif
roland@6223 3252
roland@6223 3253 } else {
roland@6223 3254 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
roland@6223 3255 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
roland@6223 3256
roland@6223 3257 // already unknown. Nothing to do anymore.
roland@6223 3258 __ btst(TypeEntries::type_unknown, tmp2);
roland@6223 3259 __ brx(Assembler::notZero, false, Assembler::pt, next);
roland@6223 3260 __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2);
roland@6223 3261 }
roland@6223 3262 }
roland@6223 3263
roland@6223 3264 __ bind(do_update);
roland@6223 3265 __ st_ptr(tmp2, mdo_addr);
roland@6223 3266
roland@6223 3267 __ bind(next);
roland@6223 3268 }
roland@5914 3269 }
roland@5914 3270
duke@435 3271 void LIR_Assembler::align_backward_branch_target() {
kvn@1800 3272 __ align(OptoLoopAlignment);
duke@435 3273 }
duke@435 3274
duke@435 3275
duke@435 3276 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
duke@435 3277 // make sure we are expecting a delay
duke@435 3278 // this has the side effect of clearing the delay state
duke@435 3279 // so we can use _masm instead of _masm->delayed() to do the
duke@435 3280 // code generation.
duke@435 3281 __ delayed();
duke@435 3282
duke@435 3283 // make sure we only emit one instruction
duke@435 3284 int offset = code_offset();
duke@435 3285 op->delay_op()->emit_code(this);
duke@435 3286 #ifdef ASSERT
duke@435 3287 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
duke@435 3288 op->delay_op()->print();
duke@435 3289 }
duke@435 3290 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
duke@435 3291 "only one instruction can go in a delay slot");
duke@435 3292 #endif
duke@435 3293
duke@435 3294 // we may also be emitting the call info for the instruction
duke@435 3295 // which we are the delay slot of.
twisti@1919 3296 CodeEmitInfo* call_info = op->call_info();
duke@435 3297 if (call_info) {
duke@435 3298 add_call_info(code_offset(), call_info);
duke@435 3299 }
duke@435 3300
duke@435 3301 if (VerifyStackAtCalls) {
duke@435 3302 _masm->sub(FP, SP, O7);
duke@435 3303 _masm->cmp(O7, initial_frame_size_in_bytes());
duke@435 3304 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
duke@435 3305 }
duke@435 3306 }
duke@435 3307
duke@435 3308
duke@435 3309 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
duke@435 3310 assert(left->is_register(), "can only handle registers");
duke@435 3311
duke@435 3312 if (left->is_single_cpu()) {
duke@435 3313 __ neg(left->as_register(), dest->as_register());
duke@435 3314 } else if (left->is_single_fpu()) {
duke@435 3315 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
duke@435 3316 } else if (left->is_double_fpu()) {
duke@435 3317 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
duke@435 3318 } else {
duke@435 3319 assert (left->is_double_cpu(), "Must be a long");
duke@435 3320 Register Rlow = left->as_register_lo();
duke@435 3321 Register Rhi = left->as_register_hi();
duke@435 3322 #ifdef _LP64
duke@435 3323 __ sub(G0, Rlow, dest->as_register_lo());
duke@435 3324 #else
duke@435 3325 __ subcc(G0, Rlow, dest->as_register_lo());
duke@435 3326 __ subc (G0, Rhi, dest->as_register_hi());
duke@435 3327 #endif
duke@435 3328 }
duke@435 3329 }
duke@435 3330
duke@435 3331
duke@435 3332 void LIR_Assembler::fxch(int i) {
duke@435 3333 Unimplemented();
duke@435 3334 }
duke@435 3335
duke@435 3336 void LIR_Assembler::fld(int i) {
duke@435 3337 Unimplemented();
duke@435 3338 }
duke@435 3339
duke@435 3340 void LIR_Assembler::ffree(int i) {
duke@435 3341 Unimplemented();
duke@435 3342 }
duke@435 3343
duke@435 3344 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
duke@435 3345 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
duke@435 3346
duke@435 3347 // if tmp is invalid, then the function being called doesn't destroy the thread
duke@435 3348 if (tmp->is_valid()) {
duke@435 3349 __ save_thread(tmp->as_register());
duke@435 3350 }
duke@435 3351 __ call(dest, relocInfo::runtime_call_type);
duke@435 3352 __ delayed()->nop();
duke@435 3353 if (info != NULL) {
duke@435 3354 add_call_info_here(info);
duke@435 3355 }
duke@435 3356 if (tmp->is_valid()) {
duke@435 3357 __ restore_thread(tmp->as_register());
duke@435 3358 }
duke@435 3359
duke@435 3360 #ifdef ASSERT
duke@435 3361 __ verify_thread();
duke@435 3362 #endif // ASSERT
duke@435 3363 }
duke@435 3364
duke@435 3365
duke@435 3366 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
duke@435 3367 #ifdef _LP64
duke@435 3368 ShouldNotReachHere();
duke@435 3369 #endif
duke@435 3370
duke@435 3371 NEEDS_CLEANUP;
duke@435 3372 if (type == T_LONG) {
duke@435 3373 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
duke@435 3374
duke@435 3375 // (extended to allow indexed as well as constant displaced for JSR-166)
duke@435 3376 Register idx = noreg; // contains either constant offset or index
duke@435 3377
duke@435 3378 int disp = mem_addr->disp();
duke@435 3379 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
duke@435 3380 if (!Assembler::is_simm13(disp)) {
duke@435 3381 idx = O7;
duke@435 3382 __ set(disp, idx);
duke@435 3383 }
duke@435 3384 } else {
duke@435 3385 assert(disp == 0, "not both indexed and disp");
duke@435 3386 idx = mem_addr->index()->as_register();
duke@435 3387 }
duke@435 3388
duke@435 3389 int null_check_offset = -1;
duke@435 3390
duke@435 3391 Register base = mem_addr->base()->as_register();
duke@435 3392 if (src->is_register() && dest->is_address()) {
duke@435 3393 // G4 is high half, G5 is low half
morris@5283 3394 // clear the top bits of G5, and scale up G4
morris@5283 3395 __ srl (src->as_register_lo(), 0, G5);
morris@5283 3396 __ sllx(src->as_register_hi(), 32, G4);
morris@5283 3397 // combine the two halves into the 64 bits of G4
morris@5283 3398 __ or3(G4, G5, G4);
morris@5283 3399 null_check_offset = __ offset();
morris@5283 3400 if (idx == noreg) {
morris@5283 3401 __ stx(G4, base, disp);
duke@435 3402 } else {
morris@5283 3403 __ stx(G4, base, idx);
duke@435 3404 }
duke@435 3405 } else if (src->is_address() && dest->is_register()) {
duke@435 3406 null_check_offset = __ offset();
morris@5283 3407 if (idx == noreg) {
morris@5283 3408 __ ldx(base, disp, G5);
duke@435 3409 } else {
morris@5283 3410 __ ldx(base, idx, G5);
duke@435 3411 }
morris@5283 3412 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
morris@5283 3413 __ mov (G5, dest->as_register_lo()); // copy low half into lo
duke@435 3414 } else {
duke@435 3415 Unimplemented();
duke@435 3416 }
duke@435 3417 if (info != NULL) {
duke@435 3418 add_debug_info_for_null_check(null_check_offset, info);
duke@435 3419 }
duke@435 3420
duke@435 3421 } else {
duke@435 3422 // use normal move for all other volatiles since they don't need
duke@435 3423 // special handling to remain atomic.
iveresov@2344 3424 move_op(src, dest, type, lir_patch_none, info, false, false, false);
duke@435 3425 }
duke@435 3426 }
duke@435 3427
duke@435 3428 void LIR_Assembler::membar() {
duke@435 3429 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
duke@435 3430 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
duke@435 3431 }
duke@435 3432
duke@435 3433 void LIR_Assembler::membar_acquire() {
duke@435 3434 // no-op on TSO
duke@435 3435 }
duke@435 3436
duke@435 3437 void LIR_Assembler::membar_release() {
duke@435 3438 // no-op on TSO
duke@435 3439 }
duke@435 3440
jiangli@3592 3441 void LIR_Assembler::membar_loadload() {
jiangli@3592 3442 // no-op
jiangli@3592 3443 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
jiangli@3592 3444 }
jiangli@3592 3445
jiangli@3592 3446 void LIR_Assembler::membar_storestore() {
jiangli@3592 3447 // no-op
jiangli@3592 3448 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
jiangli@3592 3449 }
jiangli@3592 3450
jiangli@3592 3451 void LIR_Assembler::membar_loadstore() {
jiangli@3592 3452 // no-op
jiangli@3592 3453 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
jiangli@3592 3454 }
jiangli@3592 3455
jiangli@3592 3456 void LIR_Assembler::membar_storeload() {
jiangli@3592 3457 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
jiangli@3592 3458 }
jiangli@3592 3459
jiangli@3592 3460
iveresov@2138 3461 // Pack two sequential registers containing 32 bit values
duke@435 3462 // into a single 64 bit register.
iveresov@2138 3463 // src and src->successor() are packed into dst
iveresov@2138 3464 // src and dst may be the same register.
iveresov@2138 3465 // Note: src is destroyed
iveresov@2138 3466 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
iveresov@2138 3467 Register rs = src->as_register();
iveresov@2138 3468 Register rd = dst->as_register_lo();
duke@435 3469 __ sllx(rs, 32, rs);
duke@435 3470 __ srl(rs->successor(), 0, rs->successor());
duke@435 3471 __ or3(rs, rs->successor(), rd);
duke@435 3472 }
duke@435 3473
iveresov@2138 3474 // Unpack a 64 bit value in a register into
duke@435 3475 // two sequential registers.
iveresov@2138 3476 // src is unpacked into dst and dst->successor()
iveresov@2138 3477 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
iveresov@2138 3478 Register rs = src->as_register_lo();
iveresov@2138 3479 Register rd = dst->as_register_hi();
iveresov@2138 3480 assert_different_registers(rs, rd, rd->successor());
iveresov@2138 3481 __ srlx(rs, 32, rd);
iveresov@2138 3482 __ srl (rs, 0, rd->successor());
duke@435 3483 }
duke@435 3484
duke@435 3485
duke@435 3486 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
duke@435 3487 LIR_Address* addr = addr_opr->as_address_ptr();
roland@6223 3488 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet");
roland@6223 3489
roland@6223 3490 if (Assembler::is_simm13(addr->disp())) {
roland@6223 3491 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
roland@6223 3492 } else {
roland@6223 3493 __ set(addr->disp(), G3_scratch);
roland@6223 3494 __ add(addr->base()->as_pointer_register(), G3_scratch, dest->as_pointer_register());
roland@6223 3495 }
duke@435 3496 }
duke@435 3497
duke@435 3498
duke@435 3499 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
duke@435 3500 assert(result_reg->is_register(), "check");
duke@435 3501 __ mov(G2_thread, result_reg->as_register());
duke@435 3502 }
duke@435 3503
roland@4860 3504 #ifdef ASSERT
roland@4860 3505 // emit run-time assertion
roland@4860 3506 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
roland@4860 3507 assert(op->code() == lir_assert, "must be");
roland@4860 3508
roland@4860 3509 if (op->in_opr1()->is_valid()) {
roland@4860 3510 assert(op->in_opr2()->is_valid(), "both operands must be valid");
roland@4860 3511 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
roland@4860 3512 } else {
roland@4860 3513 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
roland@4860 3514 assert(op->condition() == lir_cond_always, "no other conditions allowed");
roland@4860 3515 }
roland@4860 3516
roland@4860 3517 Label ok;
roland@4860 3518 if (op->condition() != lir_cond_always) {
roland@4860 3519 Assembler::Condition acond;
roland@4860 3520 switch (op->condition()) {
roland@4860 3521 case lir_cond_equal: acond = Assembler::equal; break;
roland@4860 3522 case lir_cond_notEqual: acond = Assembler::notEqual; break;
roland@4860 3523 case lir_cond_less: acond = Assembler::less; break;
roland@4860 3524 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
roland@4860 3525 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
roland@4860 3526 case lir_cond_greater: acond = Assembler::greater; break;
roland@4860 3527 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
roland@4860 3528 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
roland@4860 3529 default: ShouldNotReachHere();
roland@4860 3530 };
roland@4860 3531 __ br(acond, false, Assembler::pt, ok);
roland@4860 3532 __ delayed()->nop();
roland@4860 3533 }
roland@4860 3534 if (op->halt()) {
roland@4860 3535 const char* str = __ code_string(op->msg());
roland@4860 3536 __ stop(str);
roland@4860 3537 } else {
roland@4860 3538 breakpoint();
roland@4860 3539 }
roland@4860 3540 __ bind(ok);
roland@4860 3541 }
roland@4860 3542 #endif
duke@435 3543
duke@435 3544 void LIR_Assembler::peephole(LIR_List* lir) {
duke@435 3545 LIR_OpList* inst = lir->instructions_list();
duke@435 3546 for (int i = 0; i < inst->length(); i++) {
duke@435 3547 LIR_Op* op = inst->at(i);
duke@435 3548 switch (op->code()) {
duke@435 3549 case lir_cond_float_branch:
duke@435 3550 case lir_branch: {
duke@435 3551 LIR_OpBranch* branch = op->as_OpBranch();
duke@435 3552 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
duke@435 3553 LIR_Op* delay_op = NULL;
duke@435 3554 // we'd like to be able to pull following instructions into
duke@435 3555 // this slot but we don't know enough to do it safely yet so
duke@435 3556 // only optimize block to block control flow.
duke@435 3557 if (LIRFillDelaySlots && branch->block()) {
duke@435 3558 LIR_Op* prev = inst->at(i - 1);
duke@435 3559 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
duke@435 3560 // swap previous instruction into delay slot
duke@435 3561 inst->at_put(i - 1, op);
duke@435 3562 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
duke@435 3563 #ifndef PRODUCT
duke@435 3564 if (LIRTracePeephole) {
duke@435 3565 tty->print_cr("delayed");
duke@435 3566 inst->at(i - 1)->print();
duke@435 3567 inst->at(i)->print();
twisti@1919 3568 tty->cr();
duke@435 3569 }
duke@435 3570 #endif
duke@435 3571 continue;
duke@435 3572 }
duke@435 3573 }
duke@435 3574
duke@435 3575 if (!delay_op) {
duke@435 3576 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
duke@435 3577 }
duke@435 3578 inst->insert_before(i + 1, delay_op);
duke@435 3579 break;
duke@435 3580 }
duke@435 3581 case lir_static_call:
duke@435 3582 case lir_virtual_call:
duke@435 3583 case lir_icvirtual_call:
twisti@1919 3584 case lir_optvirtual_call:
twisti@1919 3585 case lir_dynamic_call: {
duke@435 3586 LIR_Op* prev = inst->at(i - 1);
duke@435 3587 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
duke@435 3588 (op->code() != lir_virtual_call ||
duke@435 3589 !prev->result_opr()->is_single_cpu() ||
duke@435 3590 prev->result_opr()->as_register() != O0) &&
duke@435 3591 LIR_Assembler::is_single_instruction(prev)) {
duke@435 3592 // Only moves without info can be put into the delay slot.
duke@435 3593 // Also don't allow the setup of the receiver in the delay
duke@435 3594 // slot for vtable calls.
duke@435 3595 inst->at_put(i - 1, op);
duke@435 3596 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
duke@435 3597 #ifndef PRODUCT
duke@435 3598 if (LIRTracePeephole) {
duke@435 3599 tty->print_cr("delayed");
duke@435 3600 inst->at(i - 1)->print();
duke@435 3601 inst->at(i)->print();
twisti@1919 3602 tty->cr();
duke@435 3603 }
duke@435 3604 #endif
iveresov@2138 3605 } else {
iveresov@2138 3606 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
iveresov@2138 3607 inst->insert_before(i + 1, delay_op);
iveresov@2138 3608 i++;
duke@435 3609 }
duke@435 3610
iveresov@2138 3611 #if defined(TIERED) && !defined(_LP64)
iveresov@2138 3612 // fixup the return value from G1 to O0/O1 for long returns.
iveresov@2138 3613 // It's done here instead of in LIRGenerator because there's
iveresov@2138 3614 // such a mismatch between the single reg and double reg
iveresov@2138 3615 // calling convention.
iveresov@2138 3616 LIR_OpJavaCall* callop = op->as_OpJavaCall();
iveresov@2138 3617 if (callop->result_opr() == FrameMap::out_long_opr) {
iveresov@2138 3618 LIR_OpJavaCall* call;
iveresov@2138 3619 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
iveresov@2138 3620 for (int a = 0; a < arguments->length(); a++) {
iveresov@2138 3621 arguments[a] = callop->arguments()[a];
iveresov@2138 3622 }
iveresov@2138 3623 if (op->code() == lir_virtual_call) {
iveresov@2138 3624 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
iveresov@2138 3625 callop->vtable_offset(), arguments, callop->info());
iveresov@2138 3626 } else {
iveresov@2138 3627 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
iveresov@2138 3628 callop->addr(), arguments, callop->info());
iveresov@2138 3629 }
iveresov@2138 3630 inst->at_put(i - 1, call);
iveresov@2138 3631 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
iveresov@2138 3632 T_LONG, lir_patch_none, NULL));
iveresov@2138 3633 }
iveresov@2138 3634 #endif
duke@435 3635 break;
duke@435 3636 }
duke@435 3637 }
duke@435 3638 }
duke@435 3639 }
duke@435 3640
roland@4106 3641 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
roland@4106 3642 LIR_Address* addr = src->as_address_ptr();
roland@4106 3643
roland@4106 3644 assert(data == dest, "swap uses only 2 operands");
roland@4106 3645 assert (code == lir_xchg, "no xadd on sparc");
roland@4106 3646
roland@4106 3647 if (data->type() == T_INT) {
roland@4106 3648 __ swap(as_Address(addr), data->as_register());
roland@4106 3649 } else if (data->is_oop()) {
roland@4106 3650 Register obj = data->as_register();
roland@4106 3651 Register narrow = tmp->as_register();
roland@4106 3652 #ifdef _LP64
roland@4106 3653 assert(UseCompressedOops, "swap is 32bit only");
roland@4106 3654 __ encode_heap_oop(obj, narrow);
roland@4106 3655 __ swap(as_Address(addr), narrow);
roland@4106 3656 __ decode_heap_oop(narrow, obj);
roland@4106 3657 #else
roland@4106 3658 __ swap(as_Address(addr), obj);
roland@4106 3659 #endif
roland@4106 3660 } else {
roland@4106 3661 ShouldNotReachHere();
roland@4106 3662 }
roland@4106 3663 }
duke@435 3664
duke@435 3665 #undef __

mercurial