src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp

Tue, 11 Sep 2012 20:20:38 -0400

author
coleenp
date
Tue, 11 Sep 2012 20:20:38 -0400
changeset 4052
75f33eecc1b3
parent 4051
8a02ca5e5576
child 4106
7eca5de9e0b6
permissions
-rw-r--r--

7196681: NPG: Some JSR 292 tests crash in Windows exception handler
Summary: There was a rogue os::breakpoint() call in log_dependency left over from the jsr292 merge. Also changed verify_oop() calls for metadata to verify_{method,klass}_ptr.
Reviewed-by: kvn, twisti

duke@435 1 /*
kvn@3760 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "c1/c1_Compilation.hpp"
stefank@2314 27 #include "c1/c1_LIRAssembler.hpp"
stefank@2314 28 #include "c1/c1_MacroAssembler.hpp"
stefank@2314 29 #include "c1/c1_Runtime1.hpp"
stefank@2314 30 #include "c1/c1_ValueStack.hpp"
stefank@2314 31 #include "ci/ciArrayKlass.hpp"
stefank@2314 32 #include "ci/ciInstance.hpp"
stefank@2314 33 #include "gc_interface/collectedHeap.hpp"
stefank@2314 34 #include "memory/barrierSet.hpp"
stefank@2314 35 #include "memory/cardTableModRefBS.hpp"
stefank@2314 36 #include "nativeInst_sparc.hpp"
stefank@2314 37 #include "oops/objArrayKlass.hpp"
stefank@2314 38 #include "runtime/sharedRuntime.hpp"
duke@435 39
duke@435 40 #define __ _masm->
duke@435 41
duke@435 42
duke@435 43 //------------------------------------------------------------
duke@435 44
duke@435 45
duke@435 46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
duke@435 47 if (opr->is_constant()) {
duke@435 48 LIR_Const* constant = opr->as_constant_ptr();
duke@435 49 switch (constant->type()) {
duke@435 50 case T_INT: {
duke@435 51 jint value = constant->as_jint();
duke@435 52 return Assembler::is_simm13(value);
duke@435 53 }
duke@435 54
duke@435 55 default:
duke@435 56 return false;
duke@435 57 }
duke@435 58 }
duke@435 59 return false;
duke@435 60 }
duke@435 61
duke@435 62
duke@435 63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
duke@435 64 switch (op->code()) {
duke@435 65 case lir_null_check:
duke@435 66 return true;
duke@435 67
duke@435 68
duke@435 69 case lir_add:
duke@435 70 case lir_ushr:
duke@435 71 case lir_shr:
duke@435 72 case lir_shl:
duke@435 73 // integer shifts and adds are always one instruction
duke@435 74 return op->result_opr()->is_single_cpu();
duke@435 75
duke@435 76
duke@435 77 case lir_move: {
duke@435 78 LIR_Op1* op1 = op->as_Op1();
duke@435 79 LIR_Opr src = op1->in_opr();
duke@435 80 LIR_Opr dst = op1->result_opr();
duke@435 81
duke@435 82 if (src == dst) {
duke@435 83 NEEDS_CLEANUP;
duke@435 84 // this works around a problem where moves with the same src and dst
duke@435 85 // end up in the delay slot and then the assembler swallows the mov
duke@435 86 // since it has no effect and then it complains because the delay slot
duke@435 87 // is empty. returning false stops the optimizer from putting this in
duke@435 88 // the delay slot
duke@435 89 return false;
duke@435 90 }
duke@435 91
duke@435 92 // don't put moves involving oops into the delay slot since the VerifyOops code
duke@435 93 // will make it much larger than a single instruction.
duke@435 94 if (VerifyOops) {
duke@435 95 return false;
duke@435 96 }
duke@435 97
duke@435 98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
duke@435 99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
duke@435 100 return false;
duke@435 101 }
duke@435 102
iveresov@2344 103 if (UseCompressedOops) {
iveresov@2344 104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
iveresov@2344 105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
iveresov@2344 106 }
iveresov@2344 107
duke@435 108 if (dst->is_register()) {
duke@435 109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
duke@435 110 return !PatchALot;
duke@435 111 } else if (src->is_single_stack()) {
duke@435 112 return true;
duke@435 113 }
duke@435 114 }
duke@435 115
duke@435 116 if (src->is_register()) {
duke@435 117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
duke@435 118 return !PatchALot;
duke@435 119 } else if (dst->is_single_stack()) {
duke@435 120 return true;
duke@435 121 }
duke@435 122 }
duke@435 123
duke@435 124 if (dst->is_register() &&
duke@435 125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
duke@435 126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
duke@435 127 return true;
duke@435 128 }
duke@435 129
duke@435 130 return false;
duke@435 131 }
duke@435 132
duke@435 133 default:
duke@435 134 return false;
duke@435 135 }
duke@435 136 ShouldNotReachHere();
duke@435 137 }
duke@435 138
duke@435 139
duke@435 140 LIR_Opr LIR_Assembler::receiverOpr() {
duke@435 141 return FrameMap::O0_oop_opr;
duke@435 142 }
duke@435 143
duke@435 144
duke@435 145 LIR_Opr LIR_Assembler::osrBufferPointer() {
duke@435 146 return FrameMap::I0_opr;
duke@435 147 }
duke@435 148
duke@435 149
duke@435 150 int LIR_Assembler::initial_frame_size_in_bytes() {
duke@435 151 return in_bytes(frame_map()->framesize_in_bytes());
duke@435 152 }
duke@435 153
duke@435 154
duke@435 155 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
duke@435 156 // we fetch the class of the receiver (O0) and compare it with the cached class.
duke@435 157 // If they do not match we jump to slow case.
duke@435 158 int LIR_Assembler::check_icache() {
duke@435 159 int offset = __ offset();
duke@435 160 __ inline_cache_check(O0, G5_inline_cache_reg);
duke@435 161 return offset;
duke@435 162 }
duke@435 163
duke@435 164
duke@435 165 void LIR_Assembler::osr_entry() {
duke@435 166 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
duke@435 167 //
duke@435 168 // 1. Create a new compiled activation.
duke@435 169 // 2. Initialize local variables in the compiled activation. The expression stack must be empty
duke@435 170 // at the osr_bci; it is not initialized.
duke@435 171 // 3. Jump to the continuation address in compiled code to resume execution.
duke@435 172
duke@435 173 // OSR entry point
duke@435 174 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
duke@435 175 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
duke@435 176 ValueStack* entry_state = osr_entry->end()->state();
duke@435 177 int number_of_locks = entry_state->locks_size();
duke@435 178
duke@435 179 // Create a frame for the compiled activation.
duke@435 180 __ build_frame(initial_frame_size_in_bytes());
duke@435 181
duke@435 182 // OSR buffer is
duke@435 183 //
duke@435 184 // locals[nlocals-1..0]
duke@435 185 // monitors[number_of_locks-1..0]
duke@435 186 //
duke@435 187 // locals is a direct copy of the interpreter frame so in the osr buffer
duke@435 188 // so first slot in the local array is the last local from the interpreter
duke@435 189 // and last slot is local[0] (receiver) from the interpreter
duke@435 190 //
duke@435 191 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
duke@435 192 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
duke@435 193 // in the interpreter frame (the method lock if a sync method)
duke@435 194
duke@435 195 // Initialize monitors in the compiled activation.
duke@435 196 // I0: pointer to osr buffer
duke@435 197 //
duke@435 198 // All other registers are dead at this point and the locals will be
duke@435 199 // copied into place by code emitted in the IR.
duke@435 200
duke@435 201 Register OSR_buf = osrBufferPointer()->as_register();
duke@435 202 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
duke@435 203 int monitor_offset = BytesPerWord * method()->max_locals() +
roland@1495 204 (2 * BytesPerWord) * (number_of_locks - 1);
roland@1495 205 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
roland@1495 206 // the OSR buffer using 2 word entries: first the lock and then
roland@1495 207 // the oop.
duke@435 208 for (int i = 0; i < number_of_locks; i++) {
roland@1495 209 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
duke@435 210 #ifdef ASSERT
duke@435 211 // verify the interpreter's monitor has a non-null object
duke@435 212 {
duke@435 213 Label L;
roland@1495 214 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
kvn@3037 215 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L);
duke@435 216 __ stop("locked object is NULL");
duke@435 217 __ bind(L);
duke@435 218 }
duke@435 219 #endif // ASSERT
duke@435 220 // Copy the lock field into the compiled activation.
roland@1495 221 __ ld_ptr(OSR_buf, slot_offset + 0, O7);
duke@435 222 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
roland@1495 223 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
duke@435 224 __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
duke@435 225 }
duke@435 226 }
duke@435 227 }
duke@435 228
duke@435 229
duke@435 230 // Optimized Library calls
duke@435 231 // This is the fast version of java.lang.String.compare; it has not
duke@435 232 // OSR-entry and therefore, we generate a slow version for OSR's
duke@435 233 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
duke@435 234 Register str0 = left->as_register();
duke@435 235 Register str1 = right->as_register();
duke@435 236
duke@435 237 Label Ldone;
duke@435 238
duke@435 239 Register result = dst->as_register();
duke@435 240 {
kvn@3760 241 // Get a pointer to the first character of string0 in tmp0
kvn@3760 242 // and get string0.length() in str0
kvn@3760 243 // Get a pointer to the first character of string1 in tmp1
kvn@3760 244 // and get string1.length() in str1
kvn@3760 245 // Also, get string0.length()-string1.length() in
kvn@3760 246 // o7 and get the condition code set
duke@435 247 // Note: some instructions have been hoisted for better instruction scheduling
duke@435 248
duke@435 249 Register tmp0 = L0;
duke@435 250 Register tmp1 = L1;
duke@435 251 Register tmp2 = L2;
duke@435 252
duke@435 253 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array
kvn@3760 254 if (java_lang_String::has_offset_field()) {
kvn@3760 255 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
kvn@3760 256 int count_offset = java_lang_String:: count_offset_in_bytes();
kvn@3760 257 __ load_heap_oop(str0, value_offset, tmp0);
kvn@3760 258 __ ld(str0, offset_offset, tmp2);
kvn@3760 259 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
kvn@3760 260 __ ld(str0, count_offset, str0);
kvn@3760 261 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
kvn@3760 262 } else {
kvn@3760 263 __ load_heap_oop(str0, value_offset, tmp1);
kvn@3760 264 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
kvn@3760 265 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0);
kvn@3760 266 }
duke@435 267
duke@435 268 // str1 may be null
duke@435 269 add_debug_info_for_null_check_here(info);
duke@435 270
kvn@3760 271 if (java_lang_String::has_offset_field()) {
kvn@3760 272 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
kvn@3760 273 int count_offset = java_lang_String:: count_offset_in_bytes();
kvn@3760 274 __ load_heap_oop(str1, value_offset, tmp1);
kvn@3760 275 __ add(tmp0, tmp2, tmp0);
kvn@3760 276
kvn@3760 277 __ ld(str1, offset_offset, tmp2);
kvn@3760 278 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
kvn@3760 279 __ ld(str1, count_offset, str1);
kvn@3760 280 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
kvn@3760 281 __ add(tmp1, tmp2, tmp1);
kvn@3760 282 } else {
kvn@3760 283 __ load_heap_oop(str1, value_offset, tmp2);
kvn@3760 284 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
kvn@3760 285 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1);
kvn@3760 286 }
duke@435 287 __ subcc(str0, str1, O7);
duke@435 288 }
duke@435 289
duke@435 290 {
duke@435 291 // Compute the minimum of the string lengths, scale it and store it in limit
duke@435 292 Register count0 = I0;
duke@435 293 Register count1 = I1;
duke@435 294 Register limit = L3;
duke@435 295
duke@435 296 Label Lskip;
duke@435 297 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter
duke@435 298 __ br(Assembler::greater, true, Assembler::pt, Lskip);
duke@435 299 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter
duke@435 300 __ bind(Lskip);
duke@435 301
duke@435 302 // If either string is empty (or both of them) the result is the difference in lengths
duke@435 303 __ cmp(limit, 0);
duke@435 304 __ br(Assembler::equal, true, Assembler::pn, Ldone);
duke@435 305 __ delayed()->mov(O7, result); // result is difference in lengths
duke@435 306 }
duke@435 307
duke@435 308 {
duke@435 309 // Neither string is empty
duke@435 310 Label Lloop;
duke@435 311
duke@435 312 Register base0 = L0;
duke@435 313 Register base1 = L1;
duke@435 314 Register chr0 = I0;
duke@435 315 Register chr1 = I1;
duke@435 316 Register limit = L3;
duke@435 317
duke@435 318 // Shift base0 and base1 to the end of the arrays, negate limit
duke@435 319 __ add(base0, limit, base0);
duke@435 320 __ add(base1, limit, base1);
kvn@3760 321 __ neg(limit); // limit = -min{string0.length(), string1.length()}
duke@435 322
duke@435 323 __ lduh(base0, limit, chr0);
duke@435 324 __ bind(Lloop);
duke@435 325 __ lduh(base1, limit, chr1);
duke@435 326 __ subcc(chr0, chr1, chr0);
duke@435 327 __ br(Assembler::notZero, false, Assembler::pn, Ldone);
duke@435 328 assert(chr0 == result, "result must be pre-placed");
duke@435 329 __ delayed()->inccc(limit, sizeof(jchar));
duke@435 330 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
duke@435 331 __ delayed()->lduh(base0, limit, chr0);
duke@435 332 }
duke@435 333
duke@435 334 // If strings are equal up to min length, return the length difference.
duke@435 335 __ mov(O7, result);
duke@435 336
duke@435 337 // Otherwise, return the difference between the first mismatched chars.
duke@435 338 __ bind(Ldone);
duke@435 339 }
duke@435 340
duke@435 341
duke@435 342 // --------------------------------------------------------------------------------------------
duke@435 343
duke@435 344 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
duke@435 345 if (!GenerateSynchronizationCode) return;
duke@435 346
duke@435 347 Register obj_reg = obj_opr->as_register();
duke@435 348 Register lock_reg = lock_opr->as_register();
duke@435 349
duke@435 350 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
duke@435 351 Register reg = mon_addr.base();
duke@435 352 int offset = mon_addr.disp();
duke@435 353 // compute pointer to BasicLock
duke@435 354 if (mon_addr.is_simm13()) {
duke@435 355 __ add(reg, offset, lock_reg);
duke@435 356 }
duke@435 357 else {
duke@435 358 __ set(offset, lock_reg);
duke@435 359 __ add(reg, lock_reg, lock_reg);
duke@435 360 }
duke@435 361 // unlock object
duke@435 362 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
duke@435 363 // _slow_case_stubs->append(slow_case);
duke@435 364 // temporary fix: must be created after exceptionhandler, therefore as call stub
duke@435 365 _slow_case_stubs->append(slow_case);
duke@435 366 if (UseFastLocking) {
duke@435 367 // try inlined fast unlocking first, revert to slow locking if it fails
duke@435 368 // note: lock_reg points to the displaced header since the displaced header offset is 0!
duke@435 369 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 370 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
duke@435 371 } else {
duke@435 372 // always do slow unlocking
duke@435 373 // note: the slow unlocking code could be inlined here, however if we use
duke@435 374 // slow unlocking, speed doesn't matter anyway and this solution is
duke@435 375 // simpler and requires less duplicated code - additionally, the
duke@435 376 // slow unlocking code is the same in either case which simplifies
duke@435 377 // debugging
duke@435 378 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
duke@435 379 __ delayed()->nop();
duke@435 380 }
duke@435 381 // done
duke@435 382 __ bind(*slow_case->continuation());
duke@435 383 }
duke@435 384
duke@435 385
twisti@1639 386 int LIR_Assembler::emit_exception_handler() {
duke@435 387 // if the last instruction is a call (typically to do a throw which
duke@435 388 // is coming at the end after block reordering) the return address
duke@435 389 // must still point into the code area in order to avoid assertion
duke@435 390 // failures when searching for the corresponding bci => add a nop
duke@435 391 // (was bug 5/14/1999 - gri)
duke@435 392 __ nop();
duke@435 393
duke@435 394 // generate code for exception handler
duke@435 395 ciMethod* method = compilation()->method();
duke@435 396
duke@435 397 address handler_base = __ start_a_stub(exception_handler_size);
duke@435 398
duke@435 399 if (handler_base == NULL) {
duke@435 400 // not enough space left for the handler
duke@435 401 bailout("exception handler overflow");
twisti@1639 402 return -1;
duke@435 403 }
twisti@1639 404
duke@435 405 int offset = code_offset();
duke@435 406
twisti@2603 407 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
duke@435 408 __ delayed()->nop();
twisti@2603 409 __ should_not_reach_here();
iveresov@3435 410 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
duke@435 411 __ end_a_stub();
twisti@1639 412
twisti@1639 413 return offset;
duke@435 414 }
duke@435 415
twisti@1639 416
never@1813 417 // Emit the code to remove the frame from the stack in the exception
never@1813 418 // unwind path.
never@1813 419 int LIR_Assembler::emit_unwind_handler() {
never@1813 420 #ifndef PRODUCT
never@1813 421 if (CommentedAssembly) {
never@1813 422 _masm->block_comment("Unwind handler");
never@1813 423 }
never@1813 424 #endif
never@1813 425
never@1813 426 int offset = code_offset();
never@1813 427
never@1813 428 // Fetch the exception from TLS and clear out exception related thread state
never@1813 429 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
never@1813 430 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
never@1813 431 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
never@1813 432
never@1813 433 __ bind(_unwind_handler_entry);
never@1813 434 __ verify_not_null_oop(O0);
never@1813 435 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
never@1813 436 __ mov(O0, I0); // Preserve the exception
never@1813 437 }
never@1813 438
never@1813 439 // Preform needed unlocking
never@1813 440 MonitorExitStub* stub = NULL;
never@1813 441 if (method()->is_synchronized()) {
never@1813 442 monitor_address(0, FrameMap::I1_opr);
never@1813 443 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
never@1813 444 __ unlock_object(I3, I2, I1, *stub->entry());
never@1813 445 __ bind(*stub->continuation());
never@1813 446 }
never@1813 447
never@1813 448 if (compilation()->env()->dtrace_method_probes()) {
never@2185 449 __ mov(G2_thread, O0);
roland@4051 450 __ save_thread(I1); // need to preserve thread in G2 across
roland@4051 451 // runtime call
coleenp@4037 452 metadata2reg(method()->constant_encoding(), O1);
never@1813 453 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
never@1813 454 __ delayed()->nop();
roland@4051 455 __ restore_thread(I1);
never@1813 456 }
never@1813 457
never@1813 458 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
never@1813 459 __ mov(I0, O0); // Restore the exception
never@1813 460 }
never@1813 461
never@1813 462 // dispatch to the unwind logic
never@1813 463 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
never@1813 464 __ delayed()->nop();
never@1813 465
never@1813 466 // Emit the slow path assembly
never@1813 467 if (stub != NULL) {
never@1813 468 stub->emit_code(this);
never@1813 469 }
never@1813 470
never@1813 471 return offset;
never@1813 472 }
never@1813 473
never@1813 474
twisti@1639 475 int LIR_Assembler::emit_deopt_handler() {
duke@435 476 // if the last instruction is a call (typically to do a throw which
duke@435 477 // is coming at the end after block reordering) the return address
duke@435 478 // must still point into the code area in order to avoid assertion
duke@435 479 // failures when searching for the corresponding bci => add a nop
duke@435 480 // (was bug 5/14/1999 - gri)
duke@435 481 __ nop();
duke@435 482
duke@435 483 // generate code for deopt handler
duke@435 484 ciMethod* method = compilation()->method();
duke@435 485 address handler_base = __ start_a_stub(deopt_handler_size);
duke@435 486 if (handler_base == NULL) {
duke@435 487 // not enough space left for the handler
duke@435 488 bailout("deopt handler overflow");
twisti@1639 489 return -1;
duke@435 490 }
twisti@1639 491
duke@435 492 int offset = code_offset();
twisti@1162 493 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
twisti@1162 494 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
duke@435 495 __ delayed()->nop();
iveresov@3435 496 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
duke@435 497 __ end_a_stub();
twisti@1639 498
twisti@1639 499 return offset;
duke@435 500 }
duke@435 501
duke@435 502
duke@435 503 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
duke@435 504 if (o == NULL) {
duke@435 505 __ set(NULL_WORD, reg);
duke@435 506 } else {
duke@435 507 int oop_index = __ oop_recorder()->find_index(o);
coleenp@4037 508 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop");
duke@435 509 RelocationHolder rspec = oop_Relocation::spec(oop_index);
duke@435 510 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
duke@435 511 }
duke@435 512 }
duke@435 513
duke@435 514
duke@435 515 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
coleenp@4037 516 // Allocate a new index in table to hold the object once it's been patched
coleenp@4037 517 int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
coleenp@4037 518 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
duke@435 519
twisti@1162 520 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
twisti@1162 521 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
duke@435 522 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
duke@435 523 // NULL will be dynamically patched later and the patched value may be large. We must
duke@435 524 // therefore generate the sethi/add as a placeholders
twisti@1162 525 __ patchable_set(addrlit, reg);
duke@435 526
duke@435 527 patching_epilog(patch, lir_patch_normal, reg, info);
duke@435 528 }
duke@435 529
duke@435 530
coleenp@4037 531 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
coleenp@4037 532 __ set_metadata_constant(o, reg);
coleenp@4037 533 }
coleenp@4037 534
coleenp@4037 535 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
coleenp@4037 536 // Allocate a new index in table to hold the klass once it's been patched
coleenp@4037 537 int index = __ oop_recorder()->allocate_metadata_index(NULL);
coleenp@4037 538 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
coleenp@4037 539 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index));
coleenp@4037 540 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
coleenp@4037 541 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
coleenp@4037 542 // NULL will be dynamically patched later and the patched value may be large. We must
coleenp@4037 543 // therefore generate the sethi/add as a placeholders
coleenp@4037 544 __ patchable_set(addrlit, reg);
coleenp@4037 545
coleenp@4037 546 patching_epilog(patch, lir_patch_normal, reg, info);
coleenp@4037 547 }
coleenp@4037 548
duke@435 549 void LIR_Assembler::emit_op3(LIR_Op3* op) {
duke@435 550 Register Rdividend = op->in_opr1()->as_register();
duke@435 551 Register Rdivisor = noreg;
duke@435 552 Register Rscratch = op->in_opr3()->as_register();
duke@435 553 Register Rresult = op->result_opr()->as_register();
duke@435 554 int divisor = -1;
duke@435 555
duke@435 556 if (op->in_opr2()->is_register()) {
duke@435 557 Rdivisor = op->in_opr2()->as_register();
duke@435 558 } else {
duke@435 559 divisor = op->in_opr2()->as_constant_ptr()->as_jint();
duke@435 560 assert(Assembler::is_simm13(divisor), "can only handle simm13");
duke@435 561 }
duke@435 562
duke@435 563 assert(Rdividend != Rscratch, "");
duke@435 564 assert(Rdivisor != Rscratch, "");
duke@435 565 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
duke@435 566
duke@435 567 if (Rdivisor == noreg && is_power_of_2(divisor)) {
duke@435 568 // convert division by a power of two into some shifts and logical operations
duke@435 569 if (op->code() == lir_idiv) {
duke@435 570 if (divisor == 2) {
duke@435 571 __ srl(Rdividend, 31, Rscratch);
duke@435 572 } else {
duke@435 573 __ sra(Rdividend, 31, Rscratch);
duke@435 574 __ and3(Rscratch, divisor - 1, Rscratch);
duke@435 575 }
duke@435 576 __ add(Rdividend, Rscratch, Rscratch);
duke@435 577 __ sra(Rscratch, log2_intptr(divisor), Rresult);
duke@435 578 return;
duke@435 579 } else {
duke@435 580 if (divisor == 2) {
duke@435 581 __ srl(Rdividend, 31, Rscratch);
duke@435 582 } else {
duke@435 583 __ sra(Rdividend, 31, Rscratch);
duke@435 584 __ and3(Rscratch, divisor - 1,Rscratch);
duke@435 585 }
duke@435 586 __ add(Rdividend, Rscratch, Rscratch);
duke@435 587 __ andn(Rscratch, divisor - 1,Rscratch);
duke@435 588 __ sub(Rdividend, Rscratch, Rresult);
duke@435 589 return;
duke@435 590 }
duke@435 591 }
duke@435 592
duke@435 593 __ sra(Rdividend, 31, Rscratch);
duke@435 594 __ wry(Rscratch);
duke@435 595 if (!VM_Version::v9_instructions_work()) {
duke@435 596 // v9 doesn't require these nops
duke@435 597 __ nop();
duke@435 598 __ nop();
duke@435 599 __ nop();
duke@435 600 __ nop();
duke@435 601 }
duke@435 602
duke@435 603 add_debug_info_for_div0_here(op->info());
duke@435 604
duke@435 605 if (Rdivisor != noreg) {
duke@435 606 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 607 } else {
duke@435 608 assert(Assembler::is_simm13(divisor), "can only handle simm13");
duke@435 609 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 610 }
duke@435 611
duke@435 612 Label skip;
duke@435 613 __ br(Assembler::overflowSet, true, Assembler::pn, skip);
duke@435 614 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
duke@435 615 __ bind(skip);
duke@435 616
duke@435 617 if (op->code() == lir_irem) {
duke@435 618 if (Rdivisor != noreg) {
duke@435 619 __ smul(Rscratch, Rdivisor, Rscratch);
duke@435 620 } else {
duke@435 621 __ smul(Rscratch, divisor, Rscratch);
duke@435 622 }
duke@435 623 __ sub(Rdividend, Rscratch, Rresult);
duke@435 624 }
duke@435 625 }
duke@435 626
duke@435 627
duke@435 628 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
duke@435 629 #ifdef ASSERT
duke@435 630 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
duke@435 631 if (op->block() != NULL) _branch_target_blocks.append(op->block());
duke@435 632 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
duke@435 633 #endif
duke@435 634 assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
duke@435 635
duke@435 636 if (op->cond() == lir_cond_always) {
duke@435 637 __ br(Assembler::always, false, Assembler::pt, *(op->label()));
duke@435 638 } else if (op->code() == lir_cond_float_branch) {
duke@435 639 assert(op->ublock() != NULL, "must have unordered successor");
duke@435 640 bool is_unordered = (op->ublock() == op->block());
duke@435 641 Assembler::Condition acond;
duke@435 642 switch (op->cond()) {
duke@435 643 case lir_cond_equal: acond = Assembler::f_equal; break;
duke@435 644 case lir_cond_notEqual: acond = Assembler::f_notEqual; break;
duke@435 645 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break;
duke@435 646 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break;
duke@435 647 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break;
duke@435 648 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
duke@435 649 default : ShouldNotReachHere();
duke@435 650 };
duke@435 651
duke@435 652 if (!VM_Version::v9_instructions_work()) {
duke@435 653 __ nop();
duke@435 654 }
duke@435 655 __ fb( acond, false, Assembler::pn, *(op->label()));
duke@435 656 } else {
duke@435 657 assert (op->code() == lir_branch, "just checking");
duke@435 658
duke@435 659 Assembler::Condition acond;
duke@435 660 switch (op->cond()) {
duke@435 661 case lir_cond_equal: acond = Assembler::equal; break;
duke@435 662 case lir_cond_notEqual: acond = Assembler::notEqual; break;
duke@435 663 case lir_cond_less: acond = Assembler::less; break;
duke@435 664 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
duke@435 665 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
duke@435 666 case lir_cond_greater: acond = Assembler::greater; break;
duke@435 667 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
duke@435 668 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
duke@435 669 default: ShouldNotReachHere();
duke@435 670 };
duke@435 671
duke@435 672 // sparc has different condition codes for testing 32-bit
duke@435 673 // vs. 64-bit values. We could always test xcc is we could
duke@435 674 // guarantee that 32-bit loads always sign extended but that isn't
duke@435 675 // true and since sign extension isn't free, it would impose a
duke@435 676 // slight cost.
duke@435 677 #ifdef _LP64
duke@435 678 if (op->type() == T_INT) {
duke@435 679 __ br(acond, false, Assembler::pn, *(op->label()));
duke@435 680 } else
duke@435 681 #endif
duke@435 682 __ brx(acond, false, Assembler::pn, *(op->label()));
duke@435 683 }
duke@435 684 // The peephole pass fills the delay slot
duke@435 685 }
duke@435 686
duke@435 687
duke@435 688 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
duke@435 689 Bytecodes::Code code = op->bytecode();
duke@435 690 LIR_Opr dst = op->result_opr();
duke@435 691
duke@435 692 switch(code) {
duke@435 693 case Bytecodes::_i2l: {
duke@435 694 Register rlo = dst->as_register_lo();
duke@435 695 Register rhi = dst->as_register_hi();
duke@435 696 Register rval = op->in_opr()->as_register();
duke@435 697 #ifdef _LP64
duke@435 698 __ sra(rval, 0, rlo);
duke@435 699 #else
duke@435 700 __ mov(rval, rlo);
duke@435 701 __ sra(rval, BitsPerInt-1, rhi);
duke@435 702 #endif
duke@435 703 break;
duke@435 704 }
duke@435 705 case Bytecodes::_i2d:
duke@435 706 case Bytecodes::_i2f: {
duke@435 707 bool is_double = (code == Bytecodes::_i2d);
duke@435 708 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
duke@435 709 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
duke@435 710 FloatRegister rsrc = op->in_opr()->as_float_reg();
duke@435 711 if (rsrc != rdst) {
duke@435 712 __ fmov(FloatRegisterImpl::S, rsrc, rdst);
duke@435 713 }
duke@435 714 __ fitof(w, rdst, rdst);
duke@435 715 break;
duke@435 716 }
duke@435 717 case Bytecodes::_f2i:{
duke@435 718 FloatRegister rsrc = op->in_opr()->as_float_reg();
duke@435 719 Address addr = frame_map()->address_for_slot(dst->single_stack_ix());
duke@435 720 Label L;
duke@435 721 // result must be 0 if value is NaN; test by comparing value to itself
duke@435 722 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
duke@435 723 if (!VM_Version::v9_instructions_work()) {
duke@435 724 __ nop();
duke@435 725 }
duke@435 726 __ fb(Assembler::f_unordered, true, Assembler::pn, L);
duke@435 727 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
duke@435 728 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
duke@435 729 // move integer result from float register to int register
duke@435 730 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
duke@435 731 __ bind (L);
duke@435 732 break;
duke@435 733 }
duke@435 734 case Bytecodes::_l2i: {
duke@435 735 Register rlo = op->in_opr()->as_register_lo();
duke@435 736 Register rhi = op->in_opr()->as_register_hi();
duke@435 737 Register rdst = dst->as_register();
duke@435 738 #ifdef _LP64
duke@435 739 __ sra(rlo, 0, rdst);
duke@435 740 #else
duke@435 741 __ mov(rlo, rdst);
duke@435 742 #endif
duke@435 743 break;
duke@435 744 }
duke@435 745 case Bytecodes::_d2f:
duke@435 746 case Bytecodes::_f2d: {
duke@435 747 bool is_double = (code == Bytecodes::_f2d);
duke@435 748 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
duke@435 749 LIR_Opr val = op->in_opr();
duke@435 750 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
duke@435 751 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
duke@435 752 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
duke@435 753 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
duke@435 754 __ ftof(vw, dw, rval, rdst);
duke@435 755 break;
duke@435 756 }
duke@435 757 case Bytecodes::_i2s:
duke@435 758 case Bytecodes::_i2b: {
duke@435 759 Register rval = op->in_opr()->as_register();
duke@435 760 Register rdst = dst->as_register();
duke@435 761 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
duke@435 762 __ sll (rval, shift, rdst);
duke@435 763 __ sra (rdst, shift, rdst);
duke@435 764 break;
duke@435 765 }
duke@435 766 case Bytecodes::_i2c: {
duke@435 767 Register rval = op->in_opr()->as_register();
duke@435 768 Register rdst = dst->as_register();
duke@435 769 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
duke@435 770 __ sll (rval, shift, rdst);
duke@435 771 __ srl (rdst, shift, rdst);
duke@435 772 break;
duke@435 773 }
duke@435 774
duke@435 775 default: ShouldNotReachHere();
duke@435 776 }
duke@435 777 }
duke@435 778
duke@435 779
duke@435 780 void LIR_Assembler::align_call(LIR_Code) {
duke@435 781 // do nothing since all instructions are word aligned on sparc
duke@435 782 }
duke@435 783
duke@435 784
twisti@1730 785 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
twisti@1730 786 __ call(op->addr(), rtype);
twisti@1919 787 // The peephole pass fills the delay slot, add_call_info is done in
twisti@1919 788 // LIR_Assembler::emit_delay.
duke@435 789 }
duke@435 790
duke@435 791
twisti@1730 792 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
coleenp@4037 793 __ ic_call(op->addr(), false);
twisti@1919 794 // The peephole pass fills the delay slot, add_call_info is done in
twisti@1919 795 // LIR_Assembler::emit_delay.
duke@435 796 }
duke@435 797
duke@435 798
twisti@1730 799 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
twisti@1730 800 add_debug_info_for_null_check_here(op->info());
iveresov@2344 801 __ load_klass(O0, G3_scratch);
twisti@3310 802 if (Assembler::is_simm13(op->vtable_offset())) {
twisti@1730 803 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
duke@435 804 } else {
duke@435 805 // This will generate 2 instructions
twisti@1730 806 __ set(op->vtable_offset(), G5_method);
duke@435 807 // ld_ptr, set_hi, set
duke@435 808 __ ld_ptr(G3_scratch, G5_method, G5_method);
duke@435 809 }
coleenp@4037 810 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch);
duke@435 811 __ callr(G3_scratch, G0);
duke@435 812 // the peephole pass fills the delay slot
duke@435 813 }
duke@435 814
iveresov@2344 815 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
duke@435 816 int store_offset;
duke@435 817 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
duke@435 818 assert(!unaligned, "can't handle this");
duke@435 819 // for offsets larger than a simm13 we setup the offset in O7
twisti@1162 820 __ set(offset, O7);
iveresov@2344 821 store_offset = store(from_reg, base, O7, type, wide);
duke@435 822 } else {
iveresov@2344 823 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 824 __ verify_oop(from_reg->as_register());
iveresov@2344 825 }
duke@435 826 store_offset = code_offset();
duke@435 827 switch (type) {
duke@435 828 case T_BOOLEAN: // fall through
duke@435 829 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break;
duke@435 830 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
duke@435 831 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
duke@435 832 case T_INT : __ stw(from_reg->as_register(), base, offset); break;
duke@435 833 case T_LONG :
duke@435 834 #ifdef _LP64
duke@435 835 if (unaligned || PatchALot) {
duke@435 836 __ srax(from_reg->as_register_lo(), 32, O7);
duke@435 837 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
duke@435 838 __ stw(O7, base, offset + hi_word_offset_in_bytes);
duke@435 839 } else {
duke@435 840 __ stx(from_reg->as_register_lo(), base, offset);
duke@435 841 }
duke@435 842 #else
duke@435 843 assert(Assembler::is_simm13(offset + 4), "must be");
duke@435 844 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
duke@435 845 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
duke@435 846 #endif
duke@435 847 break;
iveresov@2344 848 case T_ADDRESS:
roland@4051 849 case T_METADATA:
iveresov@2344 850 __ st_ptr(from_reg->as_register(), base, offset);
iveresov@2344 851 break;
duke@435 852 case T_ARRAY : // fall through
iveresov@2344 853 case T_OBJECT:
iveresov@2344 854 {
iveresov@2344 855 if (UseCompressedOops && !wide) {
iveresov@2344 856 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
iveresov@2344 857 store_offset = code_offset();
iveresov@2344 858 __ stw(G3_scratch, base, offset);
iveresov@2344 859 } else {
iveresov@2344 860 __ st_ptr(from_reg->as_register(), base, offset);
iveresov@2344 861 }
iveresov@2344 862 break;
iveresov@2344 863 }
iveresov@2344 864
duke@435 865 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
duke@435 866 case T_DOUBLE:
duke@435 867 {
duke@435 868 FloatRegister reg = from_reg->as_double_reg();
duke@435 869 // split unaligned stores
duke@435 870 if (unaligned || PatchALot) {
duke@435 871 assert(Assembler::is_simm13(offset + 4), "must be");
duke@435 872 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
duke@435 873 __ stf(FloatRegisterImpl::S, reg, base, offset);
duke@435 874 } else {
duke@435 875 __ stf(FloatRegisterImpl::D, reg, base, offset);
duke@435 876 }
duke@435 877 break;
duke@435 878 }
duke@435 879 default : ShouldNotReachHere();
duke@435 880 }
duke@435 881 }
duke@435 882 return store_offset;
duke@435 883 }
duke@435 884
duke@435 885
iveresov@2344 886 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
iveresov@2344 887 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 888 __ verify_oop(from_reg->as_register());
iveresov@2344 889 }
duke@435 890 int store_offset = code_offset();
duke@435 891 switch (type) {
duke@435 892 case T_BOOLEAN: // fall through
duke@435 893 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break;
duke@435 894 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
duke@435 895 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
duke@435 896 case T_INT : __ stw(from_reg->as_register(), base, disp); break;
duke@435 897 case T_LONG :
duke@435 898 #ifdef _LP64
duke@435 899 __ stx(from_reg->as_register_lo(), base, disp);
duke@435 900 #else
duke@435 901 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
duke@435 902 __ std(from_reg->as_register_hi(), base, disp);
duke@435 903 #endif
duke@435 904 break;
iveresov@2344 905 case T_ADDRESS:
iveresov@2344 906 __ st_ptr(from_reg->as_register(), base, disp);
iveresov@2344 907 break;
duke@435 908 case T_ARRAY : // fall through
iveresov@2344 909 case T_OBJECT:
iveresov@2344 910 {
iveresov@2344 911 if (UseCompressedOops && !wide) {
iveresov@2344 912 __ encode_heap_oop(from_reg->as_register(), G3_scratch);
iveresov@2344 913 store_offset = code_offset();
iveresov@2344 914 __ stw(G3_scratch, base, disp);
iveresov@2344 915 } else {
iveresov@2344 916 __ st_ptr(from_reg->as_register(), base, disp);
iveresov@2344 917 }
iveresov@2344 918 break;
iveresov@2344 919 }
duke@435 920 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
duke@435 921 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
duke@435 922 default : ShouldNotReachHere();
duke@435 923 }
duke@435 924 return store_offset;
duke@435 925 }
duke@435 926
duke@435 927
iveresov@2344 928 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
duke@435 929 int load_offset;
duke@435 930 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
duke@435 931 assert(base != O7, "destroying register");
duke@435 932 assert(!unaligned, "can't handle this");
duke@435 933 // for offsets larger than a simm13 we setup the offset in O7
twisti@1162 934 __ set(offset, O7);
iveresov@2344 935 load_offset = load(base, O7, to_reg, type, wide);
duke@435 936 } else {
duke@435 937 load_offset = code_offset();
duke@435 938 switch(type) {
duke@435 939 case T_BOOLEAN: // fall through
duke@435 940 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
duke@435 941 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break;
duke@435 942 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
duke@435 943 case T_INT : __ ld(base, offset, to_reg->as_register()); break;
duke@435 944 case T_LONG :
duke@435 945 if (!unaligned) {
duke@435 946 #ifdef _LP64
duke@435 947 __ ldx(base, offset, to_reg->as_register_lo());
duke@435 948 #else
duke@435 949 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
duke@435 950 "must be sequential");
duke@435 951 __ ldd(base, offset, to_reg->as_register_hi());
duke@435 952 #endif
duke@435 953 } else {
duke@435 954 #ifdef _LP64
duke@435 955 assert(base != to_reg->as_register_lo(), "can't handle this");
roland@1495 956 assert(O7 != to_reg->as_register_lo(), "can't handle this");
duke@435 957 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
roland@1495 958 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
duke@435 959 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
roland@1495 960 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
duke@435 961 #else
duke@435 962 if (base == to_reg->as_register_lo()) {
duke@435 963 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
duke@435 964 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
duke@435 965 } else {
duke@435 966 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
duke@435 967 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
duke@435 968 }
duke@435 969 #endif
duke@435 970 }
duke@435 971 break;
roland@4051 972 case T_METADATA:
iveresov@2344 973 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
duke@435 974 case T_ARRAY : // fall through
iveresov@2344 975 case T_OBJECT:
iveresov@2344 976 {
iveresov@2344 977 if (UseCompressedOops && !wide) {
iveresov@2344 978 __ lduw(base, offset, to_reg->as_register());
iveresov@2344 979 __ decode_heap_oop(to_reg->as_register());
iveresov@2344 980 } else {
iveresov@2344 981 __ ld_ptr(base, offset, to_reg->as_register());
iveresov@2344 982 }
iveresov@2344 983 break;
iveresov@2344 984 }
duke@435 985 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
duke@435 986 case T_DOUBLE:
duke@435 987 {
duke@435 988 FloatRegister reg = to_reg->as_double_reg();
duke@435 989 // split unaligned loads
duke@435 990 if (unaligned || PatchALot) {
roland@1495 991 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
roland@1495 992 __ ldf(FloatRegisterImpl::S, base, offset, reg);
duke@435 993 } else {
duke@435 994 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
duke@435 995 }
duke@435 996 break;
duke@435 997 }
duke@435 998 default : ShouldNotReachHere();
duke@435 999 }
iveresov@2344 1000 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 1001 __ verify_oop(to_reg->as_register());
iveresov@2344 1002 }
duke@435 1003 }
duke@435 1004 return load_offset;
duke@435 1005 }
duke@435 1006
duke@435 1007
iveresov@2344 1008 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
duke@435 1009 int load_offset = code_offset();
duke@435 1010 switch(type) {
duke@435 1011 case T_BOOLEAN: // fall through
iveresov@2344 1012 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
iveresov@2344 1013 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
iveresov@2344 1014 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
iveresov@2344 1015 case T_INT : __ ld(base, disp, to_reg->as_register()); break;
iveresov@2344 1016 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
duke@435 1017 case T_ARRAY : // fall through
iveresov@2344 1018 case T_OBJECT:
iveresov@2344 1019 {
iveresov@2344 1020 if (UseCompressedOops && !wide) {
iveresov@2344 1021 __ lduw(base, disp, to_reg->as_register());
iveresov@2344 1022 __ decode_heap_oop(to_reg->as_register());
iveresov@2344 1023 } else {
iveresov@2344 1024 __ ld_ptr(base, disp, to_reg->as_register());
iveresov@2344 1025 }
iveresov@2344 1026 break;
iveresov@2344 1027 }
duke@435 1028 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
duke@435 1029 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
duke@435 1030 case T_LONG :
duke@435 1031 #ifdef _LP64
duke@435 1032 __ ldx(base, disp, to_reg->as_register_lo());
duke@435 1033 #else
duke@435 1034 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
duke@435 1035 "must be sequential");
duke@435 1036 __ ldd(base, disp, to_reg->as_register_hi());
duke@435 1037 #endif
duke@435 1038 break;
duke@435 1039 default : ShouldNotReachHere();
duke@435 1040 }
iveresov@2344 1041 if (type == T_ARRAY || type == T_OBJECT) {
iveresov@2344 1042 __ verify_oop(to_reg->as_register());
iveresov@2344 1043 }
duke@435 1044 return load_offset;
duke@435 1045 }
duke@435 1046
duke@435 1047 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
duke@435 1048 LIR_Const* c = src->as_constant_ptr();
duke@435 1049 switch (c->type()) {
duke@435 1050 case T_INT:
iveresov@2344 1051 case T_FLOAT: {
iveresov@2344 1052 Register src_reg = O7;
iveresov@2344 1053 int value = c->as_jint_bits();
iveresov@2344 1054 if (value == 0) {
iveresov@2344 1055 src_reg = G0;
iveresov@2344 1056 } else {
iveresov@2344 1057 __ set(value, O7);
iveresov@2344 1058 }
iveresov@2344 1059 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
iveresov@2344 1060 __ stw(src_reg, addr.base(), addr.disp());
iveresov@2344 1061 break;
iveresov@2344 1062 }
roland@1732 1063 case T_ADDRESS: {
duke@435 1064 Register src_reg = O7;
duke@435 1065 int value = c->as_jint_bits();
duke@435 1066 if (value == 0) {
duke@435 1067 src_reg = G0;
duke@435 1068 } else {
duke@435 1069 __ set(value, O7);
duke@435 1070 }
duke@435 1071 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
iveresov@2344 1072 __ st_ptr(src_reg, addr.base(), addr.disp());
duke@435 1073 break;
duke@435 1074 }
duke@435 1075 case T_OBJECT: {
duke@435 1076 Register src_reg = O7;
duke@435 1077 jobject2reg(c->as_jobject(), src_reg);
duke@435 1078 Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1079 __ st_ptr(src_reg, addr.base(), addr.disp());
duke@435 1080 break;
duke@435 1081 }
duke@435 1082 case T_LONG:
duke@435 1083 case T_DOUBLE: {
duke@435 1084 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
duke@435 1085
duke@435 1086 Register tmp = O7;
duke@435 1087 int value_lo = c->as_jint_lo_bits();
duke@435 1088 if (value_lo == 0) {
duke@435 1089 tmp = G0;
duke@435 1090 } else {
duke@435 1091 __ set(value_lo, O7);
duke@435 1092 }
duke@435 1093 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
duke@435 1094 int value_hi = c->as_jint_hi_bits();
duke@435 1095 if (value_hi == 0) {
duke@435 1096 tmp = G0;
duke@435 1097 } else {
duke@435 1098 __ set(value_hi, O7);
duke@435 1099 }
duke@435 1100 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
duke@435 1101 break;
duke@435 1102 }
duke@435 1103 default:
duke@435 1104 Unimplemented();
duke@435 1105 }
duke@435 1106 }
duke@435 1107
duke@435 1108
iveresov@2344 1109 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
duke@435 1110 LIR_Const* c = src->as_constant_ptr();
duke@435 1111 LIR_Address* addr = dest->as_address_ptr();
duke@435 1112 Register base = addr->base()->as_pointer_register();
iveresov@2344 1113 int offset = -1;
iveresov@2344 1114
duke@435 1115 switch (c->type()) {
duke@435 1116 case T_INT:
roland@1732 1117 case T_FLOAT:
roland@1732 1118 case T_ADDRESS: {
duke@435 1119 LIR_Opr tmp = FrameMap::O7_opr;
duke@435 1120 int value = c->as_jint_bits();
duke@435 1121 if (value == 0) {
duke@435 1122 tmp = FrameMap::G0_opr;
duke@435 1123 } else if (Assembler::is_simm13(value)) {
duke@435 1124 __ set(value, O7);
duke@435 1125 }
duke@435 1126 if (addr->index()->is_valid()) {
duke@435 1127 assert(addr->disp() == 0, "must be zero");
iveresov@2344 1128 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
duke@435 1129 } else {
duke@435 1130 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
iveresov@2344 1131 offset = store(tmp, base, addr->disp(), type, wide, false);
duke@435 1132 }
duke@435 1133 break;
duke@435 1134 }
duke@435 1135 case T_LONG:
duke@435 1136 case T_DOUBLE: {
duke@435 1137 assert(!addr->index()->is_valid(), "can't handle reg reg address here");
duke@435 1138 assert(Assembler::is_simm13(addr->disp()) &&
duke@435 1139 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
duke@435 1140
iveresov@2344 1141 LIR_Opr tmp = FrameMap::O7_opr;
duke@435 1142 int value_lo = c->as_jint_lo_bits();
duke@435 1143 if (value_lo == 0) {
iveresov@2344 1144 tmp = FrameMap::G0_opr;
duke@435 1145 } else {
duke@435 1146 __ set(value_lo, O7);
duke@435 1147 }
iveresov@2344 1148 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
duke@435 1149 int value_hi = c->as_jint_hi_bits();
duke@435 1150 if (value_hi == 0) {
iveresov@2344 1151 tmp = FrameMap::G0_opr;
duke@435 1152 } else {
duke@435 1153 __ set(value_hi, O7);
duke@435 1154 }
never@3248 1155 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
duke@435 1156 break;
duke@435 1157 }
duke@435 1158 case T_OBJECT: {
duke@435 1159 jobject obj = c->as_jobject();
duke@435 1160 LIR_Opr tmp;
duke@435 1161 if (obj == NULL) {
duke@435 1162 tmp = FrameMap::G0_opr;
duke@435 1163 } else {
duke@435 1164 tmp = FrameMap::O7_opr;
duke@435 1165 jobject2reg(c->as_jobject(), O7);
duke@435 1166 }
duke@435 1167 // handle either reg+reg or reg+disp address
duke@435 1168 if (addr->index()->is_valid()) {
duke@435 1169 assert(addr->disp() == 0, "must be zero");
iveresov@2344 1170 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
duke@435 1171 } else {
duke@435 1172 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
iveresov@2344 1173 offset = store(tmp, base, addr->disp(), type, wide, false);
duke@435 1174 }
duke@435 1175
duke@435 1176 break;
duke@435 1177 }
duke@435 1178 default:
duke@435 1179 Unimplemented();
duke@435 1180 }
iveresov@2344 1181 if (info != NULL) {
iveresov@2344 1182 assert(offset != -1, "offset should've been set");
iveresov@2344 1183 add_debug_info_for_null_check(offset, info);
iveresov@2344 1184 }
duke@435 1185 }
duke@435 1186
duke@435 1187
duke@435 1188 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
duke@435 1189 LIR_Const* c = src->as_constant_ptr();
duke@435 1190 LIR_Opr to_reg = dest;
duke@435 1191
duke@435 1192 switch (c->type()) {
duke@435 1193 case T_INT:
roland@1732 1194 case T_ADDRESS:
duke@435 1195 {
duke@435 1196 jint con = c->as_jint();
duke@435 1197 if (to_reg->is_single_cpu()) {
duke@435 1198 assert(patch_code == lir_patch_none, "no patching handled here");
duke@435 1199 __ set(con, to_reg->as_register());
duke@435 1200 } else {
duke@435 1201 ShouldNotReachHere();
duke@435 1202 assert(to_reg->is_single_fpu(), "wrong register kind");
duke@435 1203
duke@435 1204 __ set(con, O7);
twisti@1162 1205 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
duke@435 1206 __ st(O7, temp_slot);
duke@435 1207 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
duke@435 1208 }
duke@435 1209 }
duke@435 1210 break;
duke@435 1211
duke@435 1212 case T_LONG:
duke@435 1213 {
duke@435 1214 jlong con = c->as_jlong();
duke@435 1215
duke@435 1216 if (to_reg->is_double_cpu()) {
duke@435 1217 #ifdef _LP64
duke@435 1218 __ set(con, to_reg->as_register_lo());
duke@435 1219 #else
duke@435 1220 __ set(low(con), to_reg->as_register_lo());
duke@435 1221 __ set(high(con), to_reg->as_register_hi());
duke@435 1222 #endif
duke@435 1223 #ifdef _LP64
duke@435 1224 } else if (to_reg->is_single_cpu()) {
duke@435 1225 __ set(con, to_reg->as_register());
duke@435 1226 #endif
duke@435 1227 } else {
duke@435 1228 ShouldNotReachHere();
duke@435 1229 assert(to_reg->is_double_fpu(), "wrong register kind");
twisti@1162 1230 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS);
twisti@1162 1231 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
duke@435 1232 __ set(low(con), O7);
duke@435 1233 __ st(O7, temp_slot_lo);
duke@435 1234 __ set(high(con), O7);
duke@435 1235 __ st(O7, temp_slot_hi);
duke@435 1236 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
duke@435 1237 }
duke@435 1238 }
duke@435 1239 break;
duke@435 1240
duke@435 1241 case T_OBJECT:
duke@435 1242 {
duke@435 1243 if (patch_code == lir_patch_none) {
duke@435 1244 jobject2reg(c->as_jobject(), to_reg->as_register());
duke@435 1245 } else {
duke@435 1246 jobject2reg_with_patching(to_reg->as_register(), info);
duke@435 1247 }
duke@435 1248 }
duke@435 1249 break;
duke@435 1250
coleenp@4037 1251 case T_METADATA:
coleenp@4037 1252 {
coleenp@4037 1253 if (patch_code == lir_patch_none) {
coleenp@4037 1254 metadata2reg(c->as_metadata(), to_reg->as_register());
coleenp@4037 1255 } else {
coleenp@4037 1256 klass2reg_with_patching(to_reg->as_register(), info);
coleenp@4037 1257 }
coleenp@4037 1258 }
coleenp@4037 1259 break;
coleenp@4037 1260
duke@435 1261 case T_FLOAT:
duke@435 1262 {
duke@435 1263 address const_addr = __ float_constant(c->as_jfloat());
duke@435 1264 if (const_addr == NULL) {
duke@435 1265 bailout("const section overflow");
duke@435 1266 break;
duke@435 1267 }
duke@435 1268 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
twisti@1162 1269 AddressLiteral const_addrlit(const_addr, rspec);
duke@435 1270 if (to_reg->is_single_fpu()) {
twisti@1162 1271 __ patchable_sethi(const_addrlit, O7);
duke@435 1272 __ relocate(rspec);
twisti@1162 1273 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
duke@435 1274
duke@435 1275 } else {
duke@435 1276 assert(to_reg->is_single_cpu(), "Must be a cpu register.");
duke@435 1277
twisti@1162 1278 __ set(const_addrlit, O7);
iveresov@2344 1279 __ ld(O7, 0, to_reg->as_register());
duke@435 1280 }
duke@435 1281 }
duke@435 1282 break;
duke@435 1283
duke@435 1284 case T_DOUBLE:
duke@435 1285 {
duke@435 1286 address const_addr = __ double_constant(c->as_jdouble());
duke@435 1287 if (const_addr == NULL) {
duke@435 1288 bailout("const section overflow");
duke@435 1289 break;
duke@435 1290 }
duke@435 1291 RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
duke@435 1292
duke@435 1293 if (to_reg->is_double_fpu()) {
twisti@1162 1294 AddressLiteral const_addrlit(const_addr, rspec);
twisti@1162 1295 __ patchable_sethi(const_addrlit, O7);
duke@435 1296 __ relocate(rspec);
twisti@1162 1297 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
duke@435 1298 } else {
duke@435 1299 assert(to_reg->is_double_cpu(), "Must be a long register.");
duke@435 1300 #ifdef _LP64
duke@435 1301 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
duke@435 1302 #else
duke@435 1303 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
duke@435 1304 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
duke@435 1305 #endif
duke@435 1306 }
duke@435 1307
duke@435 1308 }
duke@435 1309 break;
duke@435 1310
duke@435 1311 default:
duke@435 1312 ShouldNotReachHere();
duke@435 1313 }
duke@435 1314 }
duke@435 1315
duke@435 1316 Address LIR_Assembler::as_Address(LIR_Address* addr) {
duke@435 1317 Register reg = addr->base()->as_register();
twisti@1162 1318 return Address(reg, addr->disp());
duke@435 1319 }
duke@435 1320
duke@435 1321
duke@435 1322 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
duke@435 1323 switch (type) {
duke@435 1324 case T_INT:
duke@435 1325 case T_FLOAT: {
duke@435 1326 Register tmp = O7;
duke@435 1327 Address from = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1328 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1329 __ lduw(from.base(), from.disp(), tmp);
duke@435 1330 __ stw(tmp, to.base(), to.disp());
duke@435 1331 break;
duke@435 1332 }
duke@435 1333 case T_OBJECT: {
duke@435 1334 Register tmp = O7;
duke@435 1335 Address from = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1336 Address to = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1337 __ ld_ptr(from.base(), from.disp(), tmp);
duke@435 1338 __ st_ptr(tmp, to.base(), to.disp());
duke@435 1339 break;
duke@435 1340 }
duke@435 1341 case T_LONG:
duke@435 1342 case T_DOUBLE: {
duke@435 1343 Register tmp = O7;
duke@435 1344 Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
duke@435 1345 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
duke@435 1346 __ lduw(from.base(), from.disp(), tmp);
duke@435 1347 __ stw(tmp, to.base(), to.disp());
duke@435 1348 __ lduw(from.base(), from.disp() + 4, tmp);
duke@435 1349 __ stw(tmp, to.base(), to.disp() + 4);
duke@435 1350 break;
duke@435 1351 }
duke@435 1352
duke@435 1353 default:
duke@435 1354 ShouldNotReachHere();
duke@435 1355 }
duke@435 1356 }
duke@435 1357
duke@435 1358
duke@435 1359 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
duke@435 1360 Address base = as_Address(addr);
twisti@1162 1361 return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
duke@435 1362 }
duke@435 1363
duke@435 1364
duke@435 1365 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
duke@435 1366 Address base = as_Address(addr);
twisti@1162 1367 return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
duke@435 1368 }
duke@435 1369
duke@435 1370
duke@435 1371 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
iveresov@2344 1372 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
duke@435 1373
roland@4051 1374 assert(type != T_METADATA, "load of metadata ptr not supported");
duke@435 1375 LIR_Address* addr = src_opr->as_address_ptr();
duke@435 1376 LIR_Opr to_reg = dest;
duke@435 1377
duke@435 1378 Register src = addr->base()->as_pointer_register();
duke@435 1379 Register disp_reg = noreg;
duke@435 1380 int disp_value = addr->disp();
duke@435 1381 bool needs_patching = (patch_code != lir_patch_none);
duke@435 1382
duke@435 1383 if (addr->base()->type() == T_OBJECT) {
duke@435 1384 __ verify_oop(src);
duke@435 1385 }
duke@435 1386
duke@435 1387 PatchingStub* patch = NULL;
duke@435 1388 if (needs_patching) {
duke@435 1389 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
duke@435 1390 assert(!to_reg->is_double_cpu() ||
duke@435 1391 patch_code == lir_patch_none ||
duke@435 1392 patch_code == lir_patch_normal, "patching doesn't match register");
duke@435 1393 }
duke@435 1394
duke@435 1395 if (addr->index()->is_illegal()) {
duke@435 1396 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
duke@435 1397 if (needs_patching) {
twisti@1162 1398 __ patchable_set(0, O7);
duke@435 1399 } else {
duke@435 1400 __ set(disp_value, O7);
duke@435 1401 }
duke@435 1402 disp_reg = O7;
duke@435 1403 }
duke@435 1404 } else if (unaligned || PatchALot) {
duke@435 1405 __ add(src, addr->index()->as_register(), O7);
duke@435 1406 src = O7;
duke@435 1407 } else {
duke@435 1408 disp_reg = addr->index()->as_pointer_register();
duke@435 1409 assert(disp_value == 0, "can't handle 3 operand addresses");
duke@435 1410 }
duke@435 1411
duke@435 1412 // remember the offset of the load. The patching_epilog must be done
duke@435 1413 // before the call to add_debug_info, otherwise the PcDescs don't get
duke@435 1414 // entered in increasing order.
duke@435 1415 int offset = code_offset();
duke@435 1416
duke@435 1417 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
duke@435 1418 if (disp_reg == noreg) {
iveresov@2344 1419 offset = load(src, disp_value, to_reg, type, wide, unaligned);
duke@435 1420 } else {
duke@435 1421 assert(!unaligned, "can't handle this");
iveresov@2344 1422 offset = load(src, disp_reg, to_reg, type, wide);
duke@435 1423 }
duke@435 1424
duke@435 1425 if (patch != NULL) {
duke@435 1426 patching_epilog(patch, patch_code, src, info);
duke@435 1427 }
duke@435 1428 if (info != NULL) add_debug_info_for_null_check(offset, info);
duke@435 1429 }
duke@435 1430
duke@435 1431
duke@435 1432 void LIR_Assembler::prefetchr(LIR_Opr src) {
duke@435 1433 LIR_Address* addr = src->as_address_ptr();
duke@435 1434 Address from_addr = as_Address(addr);
duke@435 1435
duke@435 1436 if (VM_Version::has_v9()) {
duke@435 1437 __ prefetch(from_addr, Assembler::severalReads);
duke@435 1438 }
duke@435 1439 }
duke@435 1440
duke@435 1441
duke@435 1442 void LIR_Assembler::prefetchw(LIR_Opr src) {
duke@435 1443 LIR_Address* addr = src->as_address_ptr();
duke@435 1444 Address from_addr = as_Address(addr);
duke@435 1445
duke@435 1446 if (VM_Version::has_v9()) {
duke@435 1447 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
duke@435 1448 }
duke@435 1449 }
duke@435 1450
duke@435 1451
duke@435 1452 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
duke@435 1453 Address addr;
duke@435 1454 if (src->is_single_word()) {
duke@435 1455 addr = frame_map()->address_for_slot(src->single_stack_ix());
duke@435 1456 } else if (src->is_double_word()) {
duke@435 1457 addr = frame_map()->address_for_double_slot(src->double_stack_ix());
duke@435 1458 }
duke@435 1459
duke@435 1460 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
iveresov@2344 1461 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
duke@435 1462 }
duke@435 1463
duke@435 1464
duke@435 1465 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
duke@435 1466 Address addr;
duke@435 1467 if (dest->is_single_word()) {
duke@435 1468 addr = frame_map()->address_for_slot(dest->single_stack_ix());
duke@435 1469 } else if (dest->is_double_word()) {
duke@435 1470 addr = frame_map()->address_for_slot(dest->double_stack_ix());
duke@435 1471 }
duke@435 1472 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
iveresov@2344 1473 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
duke@435 1474 }
duke@435 1475
duke@435 1476
duke@435 1477 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
duke@435 1478 if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
duke@435 1479 if (from_reg->is_double_fpu()) {
duke@435 1480 // double to double moves
duke@435 1481 assert(to_reg->is_double_fpu(), "should match");
duke@435 1482 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
duke@435 1483 } else {
duke@435 1484 // float to float moves
duke@435 1485 assert(to_reg->is_single_fpu(), "should match");
duke@435 1486 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
duke@435 1487 }
duke@435 1488 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
duke@435 1489 if (from_reg->is_double_cpu()) {
duke@435 1490 #ifdef _LP64
duke@435 1491 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
duke@435 1492 #else
duke@435 1493 assert(to_reg->is_double_cpu() &&
duke@435 1494 from_reg->as_register_hi() != to_reg->as_register_lo() &&
duke@435 1495 from_reg->as_register_lo() != to_reg->as_register_hi(),
duke@435 1496 "should both be long and not overlap");
duke@435 1497 // long to long moves
duke@435 1498 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
duke@435 1499 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
duke@435 1500 #endif
duke@435 1501 #ifdef _LP64
duke@435 1502 } else if (to_reg->is_double_cpu()) {
duke@435 1503 // int to int moves
duke@435 1504 __ mov(from_reg->as_register(), to_reg->as_register_lo());
duke@435 1505 #endif
duke@435 1506 } else {
duke@435 1507 // int to int moves
duke@435 1508 __ mov(from_reg->as_register(), to_reg->as_register());
duke@435 1509 }
duke@435 1510 } else {
duke@435 1511 ShouldNotReachHere();
duke@435 1512 }
duke@435 1513 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
duke@435 1514 __ verify_oop(to_reg->as_register());
duke@435 1515 }
duke@435 1516 }
duke@435 1517
duke@435 1518
duke@435 1519 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
duke@435 1520 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
iveresov@2344 1521 bool wide, bool unaligned) {
roland@4051 1522 assert(type != T_METADATA, "store of metadata ptr not supported");
duke@435 1523 LIR_Address* addr = dest->as_address_ptr();
duke@435 1524
duke@435 1525 Register src = addr->base()->as_pointer_register();
duke@435 1526 Register disp_reg = noreg;
duke@435 1527 int disp_value = addr->disp();
duke@435 1528 bool needs_patching = (patch_code != lir_patch_none);
duke@435 1529
duke@435 1530 if (addr->base()->is_oop_register()) {
duke@435 1531 __ verify_oop(src);
duke@435 1532 }
duke@435 1533
duke@435 1534 PatchingStub* patch = NULL;
duke@435 1535 if (needs_patching) {
duke@435 1536 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
duke@435 1537 assert(!from_reg->is_double_cpu() ||
duke@435 1538 patch_code == lir_patch_none ||
duke@435 1539 patch_code == lir_patch_normal, "patching doesn't match register");
duke@435 1540 }
duke@435 1541
duke@435 1542 if (addr->index()->is_illegal()) {
duke@435 1543 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
duke@435 1544 if (needs_patching) {
twisti@1162 1545 __ patchable_set(0, O7);
duke@435 1546 } else {
duke@435 1547 __ set(disp_value, O7);
duke@435 1548 }
duke@435 1549 disp_reg = O7;
duke@435 1550 }
duke@435 1551 } else if (unaligned || PatchALot) {
duke@435 1552 __ add(src, addr->index()->as_register(), O7);
duke@435 1553 src = O7;
duke@435 1554 } else {
duke@435 1555 disp_reg = addr->index()->as_pointer_register();
duke@435 1556 assert(disp_value == 0, "can't handle 3 operand addresses");
duke@435 1557 }
duke@435 1558
duke@435 1559 // remember the offset of the store. The patching_epilog must be done
duke@435 1560 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
duke@435 1561 // entered in increasing order.
duke@435 1562 int offset;
duke@435 1563
duke@435 1564 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
duke@435 1565 if (disp_reg == noreg) {
iveresov@2344 1566 offset = store(from_reg, src, disp_value, type, wide, unaligned);
duke@435 1567 } else {
duke@435 1568 assert(!unaligned, "can't handle this");
iveresov@2344 1569 offset = store(from_reg, src, disp_reg, type, wide);
duke@435 1570 }
duke@435 1571
duke@435 1572 if (patch != NULL) {
duke@435 1573 patching_epilog(patch, patch_code, src, info);
duke@435 1574 }
duke@435 1575
duke@435 1576 if (info != NULL) add_debug_info_for_null_check(offset, info);
duke@435 1577 }
duke@435 1578
duke@435 1579
duke@435 1580 void LIR_Assembler::return_op(LIR_Opr result) {
duke@435 1581 // the poll may need a register so just pick one that isn't the return register
iveresov@2138 1582 #if defined(TIERED) && !defined(_LP64)
duke@435 1583 if (result->type_field() == LIR_OprDesc::long_type) {
duke@435 1584 // Must move the result to G1
duke@435 1585 // Must leave proper result in O0,O1 and G1 (TIERED only)
duke@435 1586 __ sllx(I0, 32, G1); // Shift bits into high G1
duke@435 1587 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
duke@435 1588 __ or3 (I1, G1, G1); // OR 64 bits into G1
iveresov@2138 1589 #ifdef ASSERT
iveresov@2138 1590 // mangle it so any problems will show up
iveresov@2138 1591 __ set(0xdeadbeef, I0);
iveresov@2138 1592 __ set(0xdeadbeef, I1);
iveresov@2138 1593 #endif
duke@435 1594 }
duke@435 1595 #endif // TIERED
duke@435 1596 __ set((intptr_t)os::get_polling_page(), L0);
duke@435 1597 __ relocate(relocInfo::poll_return_type);
duke@435 1598 __ ld_ptr(L0, 0, G0);
duke@435 1599 __ ret();
duke@435 1600 __ delayed()->restore();
duke@435 1601 }
duke@435 1602
duke@435 1603
duke@435 1604 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
duke@435 1605 __ set((intptr_t)os::get_polling_page(), tmp->as_register());
duke@435 1606 if (info != NULL) {
duke@435 1607 add_debug_info_for_branch(info);
duke@435 1608 } else {
duke@435 1609 __ relocate(relocInfo::poll_type);
duke@435 1610 }
duke@435 1611
duke@435 1612 int offset = __ offset();
duke@435 1613 __ ld_ptr(tmp->as_register(), 0, G0);
duke@435 1614
duke@435 1615 return offset;
duke@435 1616 }
duke@435 1617
duke@435 1618
duke@435 1619 void LIR_Assembler::emit_static_call_stub() {
duke@435 1620 address call_pc = __ pc();
duke@435 1621 address stub = __ start_a_stub(call_stub_size);
duke@435 1622 if (stub == NULL) {
duke@435 1623 bailout("static call stub overflow");
duke@435 1624 return;
duke@435 1625 }
duke@435 1626
duke@435 1627 int start = __ offset();
duke@435 1628 __ relocate(static_stub_Relocation::spec(call_pc));
duke@435 1629
coleenp@4037 1630 __ set_metadata(NULL, G5);
duke@435 1631 // must be set to -1 at code generation time
twisti@1162 1632 AddressLiteral addrlit(-1);
twisti@1162 1633 __ jump_to(addrlit, G3);
duke@435 1634 __ delayed()->nop();
duke@435 1635
duke@435 1636 assert(__ offset() - start <= call_stub_size, "stub too big");
duke@435 1637 __ end_a_stub();
duke@435 1638 }
duke@435 1639
duke@435 1640
duke@435 1641 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
duke@435 1642 if (opr1->is_single_fpu()) {
duke@435 1643 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
duke@435 1644 } else if (opr1->is_double_fpu()) {
duke@435 1645 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
duke@435 1646 } else if (opr1->is_single_cpu()) {
duke@435 1647 if (opr2->is_constant()) {
duke@435 1648 switch (opr2->as_constant_ptr()->type()) {
duke@435 1649 case T_INT:
duke@435 1650 { jint con = opr2->as_constant_ptr()->as_jint();
duke@435 1651 if (Assembler::is_simm13(con)) {
duke@435 1652 __ cmp(opr1->as_register(), con);
duke@435 1653 } else {
duke@435 1654 __ set(con, O7);
duke@435 1655 __ cmp(opr1->as_register(), O7);
duke@435 1656 }
duke@435 1657 }
duke@435 1658 break;
duke@435 1659
duke@435 1660 case T_OBJECT:
duke@435 1661 // there are only equal/notequal comparisions on objects
duke@435 1662 { jobject con = opr2->as_constant_ptr()->as_jobject();
duke@435 1663 if (con == NULL) {
duke@435 1664 __ cmp(opr1->as_register(), 0);
duke@435 1665 } else {
duke@435 1666 jobject2reg(con, O7);
duke@435 1667 __ cmp(opr1->as_register(), O7);
duke@435 1668 }
duke@435 1669 }
duke@435 1670 break;
duke@435 1671
duke@435 1672 default:
duke@435 1673 ShouldNotReachHere();
duke@435 1674 break;
duke@435 1675 }
duke@435 1676 } else {
duke@435 1677 if (opr2->is_address()) {
duke@435 1678 LIR_Address * addr = opr2->as_address_ptr();
duke@435 1679 BasicType type = addr->type();
duke@435 1680 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
duke@435 1681 else __ ld(as_Address(addr), O7);
duke@435 1682 __ cmp(opr1->as_register(), O7);
duke@435 1683 } else {
duke@435 1684 __ cmp(opr1->as_register(), opr2->as_register());
duke@435 1685 }
duke@435 1686 }
duke@435 1687 } else if (opr1->is_double_cpu()) {
duke@435 1688 Register xlo = opr1->as_register_lo();
duke@435 1689 Register xhi = opr1->as_register_hi();
duke@435 1690 if (opr2->is_constant() && opr2->as_jlong() == 0) {
duke@435 1691 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
duke@435 1692 #ifdef _LP64
duke@435 1693 __ orcc(xhi, G0, G0);
duke@435 1694 #else
duke@435 1695 __ orcc(xhi, xlo, G0);
duke@435 1696 #endif
duke@435 1697 } else if (opr2->is_register()) {
duke@435 1698 Register ylo = opr2->as_register_lo();
duke@435 1699 Register yhi = opr2->as_register_hi();
duke@435 1700 #ifdef _LP64
duke@435 1701 __ cmp(xlo, ylo);
duke@435 1702 #else
duke@435 1703 __ subcc(xlo, ylo, xlo);
duke@435 1704 __ subccc(xhi, yhi, xhi);
duke@435 1705 if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
duke@435 1706 __ orcc(xhi, xlo, G0);
duke@435 1707 }
duke@435 1708 #endif
duke@435 1709 } else {
duke@435 1710 ShouldNotReachHere();
duke@435 1711 }
duke@435 1712 } else if (opr1->is_address()) {
duke@435 1713 LIR_Address * addr = opr1->as_address_ptr();
duke@435 1714 BasicType type = addr->type();
duke@435 1715 assert (opr2->is_constant(), "Checking");
duke@435 1716 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
duke@435 1717 else __ ld(as_Address(addr), O7);
duke@435 1718 __ cmp(O7, opr2->as_constant_ptr()->as_jint());
duke@435 1719 } else {
duke@435 1720 ShouldNotReachHere();
duke@435 1721 }
duke@435 1722 }
duke@435 1723
duke@435 1724
duke@435 1725 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
duke@435 1726 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
duke@435 1727 bool is_unordered_less = (code == lir_ucmp_fd2i);
duke@435 1728 if (left->is_single_fpu()) {
duke@435 1729 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
duke@435 1730 } else if (left->is_double_fpu()) {
duke@435 1731 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
duke@435 1732 } else {
duke@435 1733 ShouldNotReachHere();
duke@435 1734 }
duke@435 1735 } else if (code == lir_cmp_l2i) {
iveresov@1804 1736 #ifdef _LP64
iveresov@1804 1737 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
iveresov@1804 1738 #else
duke@435 1739 __ lcmp(left->as_register_hi(), left->as_register_lo(),
duke@435 1740 right->as_register_hi(), right->as_register_lo(),
duke@435 1741 dst->as_register());
iveresov@1804 1742 #endif
duke@435 1743 } else {
duke@435 1744 ShouldNotReachHere();
duke@435 1745 }
duke@435 1746 }
duke@435 1747
duke@435 1748
iveresov@2412 1749 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
duke@435 1750 Assembler::Condition acond;
duke@435 1751 switch (condition) {
duke@435 1752 case lir_cond_equal: acond = Assembler::equal; break;
duke@435 1753 case lir_cond_notEqual: acond = Assembler::notEqual; break;
duke@435 1754 case lir_cond_less: acond = Assembler::less; break;
duke@435 1755 case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
duke@435 1756 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
duke@435 1757 case lir_cond_greater: acond = Assembler::greater; break;
duke@435 1758 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break;
duke@435 1759 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break;
duke@435 1760 default: ShouldNotReachHere();
duke@435 1761 };
duke@435 1762
duke@435 1763 if (opr1->is_constant() && opr1->type() == T_INT) {
duke@435 1764 Register dest = result->as_register();
duke@435 1765 // load up first part of constant before branch
duke@435 1766 // and do the rest in the delay slot.
duke@435 1767 if (!Assembler::is_simm13(opr1->as_jint())) {
duke@435 1768 __ sethi(opr1->as_jint(), dest);
duke@435 1769 }
duke@435 1770 } else if (opr1->is_constant()) {
duke@435 1771 const2reg(opr1, result, lir_patch_none, NULL);
duke@435 1772 } else if (opr1->is_register()) {
duke@435 1773 reg2reg(opr1, result);
duke@435 1774 } else if (opr1->is_stack()) {
duke@435 1775 stack2reg(opr1, result, result->type());
duke@435 1776 } else {
duke@435 1777 ShouldNotReachHere();
duke@435 1778 }
duke@435 1779 Label skip;
iveresov@2412 1780 #ifdef _LP64
iveresov@2412 1781 if (type == T_INT) {
iveresov@2412 1782 __ br(acond, false, Assembler::pt, skip);
iveresov@2412 1783 } else
iveresov@2412 1784 #endif
iveresov@2412 1785 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
duke@435 1786 if (opr1->is_constant() && opr1->type() == T_INT) {
duke@435 1787 Register dest = result->as_register();
duke@435 1788 if (Assembler::is_simm13(opr1->as_jint())) {
duke@435 1789 __ delayed()->or3(G0, opr1->as_jint(), dest);
duke@435 1790 } else {
duke@435 1791 // the sethi has been done above, so just put in the low 10 bits
duke@435 1792 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
duke@435 1793 }
duke@435 1794 } else {
duke@435 1795 // can't do anything useful in the delay slot
duke@435 1796 __ delayed()->nop();
duke@435 1797 }
duke@435 1798 if (opr2->is_constant()) {
duke@435 1799 const2reg(opr2, result, lir_patch_none, NULL);
duke@435 1800 } else if (opr2->is_register()) {
duke@435 1801 reg2reg(opr2, result);
duke@435 1802 } else if (opr2->is_stack()) {
duke@435 1803 stack2reg(opr2, result, result->type());
duke@435 1804 } else {
duke@435 1805 ShouldNotReachHere();
duke@435 1806 }
duke@435 1807 __ bind(skip);
duke@435 1808 }
duke@435 1809
duke@435 1810
duke@435 1811 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
duke@435 1812 assert(info == NULL, "unused on this code path");
duke@435 1813 assert(left->is_register(), "wrong items state");
duke@435 1814 assert(dest->is_register(), "wrong items state");
duke@435 1815
duke@435 1816 if (right->is_register()) {
duke@435 1817 if (dest->is_float_kind()) {
duke@435 1818
duke@435 1819 FloatRegister lreg, rreg, res;
duke@435 1820 FloatRegisterImpl::Width w;
duke@435 1821 if (right->is_single_fpu()) {
duke@435 1822 w = FloatRegisterImpl::S;
duke@435 1823 lreg = left->as_float_reg();
duke@435 1824 rreg = right->as_float_reg();
duke@435 1825 res = dest->as_float_reg();
duke@435 1826 } else {
duke@435 1827 w = FloatRegisterImpl::D;
duke@435 1828 lreg = left->as_double_reg();
duke@435 1829 rreg = right->as_double_reg();
duke@435 1830 res = dest->as_double_reg();
duke@435 1831 }
duke@435 1832
duke@435 1833 switch (code) {
duke@435 1834 case lir_add: __ fadd(w, lreg, rreg, res); break;
duke@435 1835 case lir_sub: __ fsub(w, lreg, rreg, res); break;
duke@435 1836 case lir_mul: // fall through
duke@435 1837 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
duke@435 1838 case lir_div: // fall through
duke@435 1839 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
duke@435 1840 default: ShouldNotReachHere();
duke@435 1841 }
duke@435 1842
duke@435 1843 } else if (dest->is_double_cpu()) {
duke@435 1844 #ifdef _LP64
duke@435 1845 Register dst_lo = dest->as_register_lo();
duke@435 1846 Register op1_lo = left->as_pointer_register();
duke@435 1847 Register op2_lo = right->as_pointer_register();
duke@435 1848
duke@435 1849 switch (code) {
duke@435 1850 case lir_add:
duke@435 1851 __ add(op1_lo, op2_lo, dst_lo);
duke@435 1852 break;
duke@435 1853
duke@435 1854 case lir_sub:
duke@435 1855 __ sub(op1_lo, op2_lo, dst_lo);
duke@435 1856 break;
duke@435 1857
duke@435 1858 default: ShouldNotReachHere();
duke@435 1859 }
duke@435 1860 #else
duke@435 1861 Register op1_lo = left->as_register_lo();
duke@435 1862 Register op1_hi = left->as_register_hi();
duke@435 1863 Register op2_lo = right->as_register_lo();
duke@435 1864 Register op2_hi = right->as_register_hi();
duke@435 1865 Register dst_lo = dest->as_register_lo();
duke@435 1866 Register dst_hi = dest->as_register_hi();
duke@435 1867
duke@435 1868 switch (code) {
duke@435 1869 case lir_add:
duke@435 1870 __ addcc(op1_lo, op2_lo, dst_lo);
duke@435 1871 __ addc (op1_hi, op2_hi, dst_hi);
duke@435 1872 break;
duke@435 1873
duke@435 1874 case lir_sub:
duke@435 1875 __ subcc(op1_lo, op2_lo, dst_lo);
duke@435 1876 __ subc (op1_hi, op2_hi, dst_hi);
duke@435 1877 break;
duke@435 1878
duke@435 1879 default: ShouldNotReachHere();
duke@435 1880 }
duke@435 1881 #endif
duke@435 1882 } else {
duke@435 1883 assert (right->is_single_cpu(), "Just Checking");
duke@435 1884
duke@435 1885 Register lreg = left->as_register();
duke@435 1886 Register res = dest->as_register();
duke@435 1887 Register rreg = right->as_register();
duke@435 1888 switch (code) {
duke@435 1889 case lir_add: __ add (lreg, rreg, res); break;
duke@435 1890 case lir_sub: __ sub (lreg, rreg, res); break;
duke@435 1891 case lir_mul: __ mult (lreg, rreg, res); break;
duke@435 1892 default: ShouldNotReachHere();
duke@435 1893 }
duke@435 1894 }
duke@435 1895 } else {
duke@435 1896 assert (right->is_constant(), "must be constant");
duke@435 1897
duke@435 1898 if (dest->is_single_cpu()) {
duke@435 1899 Register lreg = left->as_register();
duke@435 1900 Register res = dest->as_register();
duke@435 1901 int simm13 = right->as_constant_ptr()->as_jint();
duke@435 1902
duke@435 1903 switch (code) {
duke@435 1904 case lir_add: __ add (lreg, simm13, res); break;
duke@435 1905 case lir_sub: __ sub (lreg, simm13, res); break;
duke@435 1906 case lir_mul: __ mult (lreg, simm13, res); break;
duke@435 1907 default: ShouldNotReachHere();
duke@435 1908 }
duke@435 1909 } else {
duke@435 1910 Register lreg = left->as_pointer_register();
duke@435 1911 Register res = dest->as_register_lo();
duke@435 1912 long con = right->as_constant_ptr()->as_jlong();
duke@435 1913 assert(Assembler::is_simm13(con), "must be simm13");
duke@435 1914
duke@435 1915 switch (code) {
duke@435 1916 case lir_add: __ add (lreg, (int)con, res); break;
duke@435 1917 case lir_sub: __ sub (lreg, (int)con, res); break;
duke@435 1918 case lir_mul: __ mult (lreg, (int)con, res); break;
duke@435 1919 default: ShouldNotReachHere();
duke@435 1920 }
duke@435 1921 }
duke@435 1922 }
duke@435 1923 }
duke@435 1924
duke@435 1925
duke@435 1926 void LIR_Assembler::fpop() {
duke@435 1927 // do nothing
duke@435 1928 }
duke@435 1929
duke@435 1930
duke@435 1931 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
duke@435 1932 switch (code) {
duke@435 1933 case lir_sin:
duke@435 1934 case lir_tan:
duke@435 1935 case lir_cos: {
duke@435 1936 assert(thread->is_valid(), "preserve the thread object for performance reasons");
duke@435 1937 assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
duke@435 1938 break;
duke@435 1939 }
duke@435 1940 case lir_sqrt: {
duke@435 1941 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
duke@435 1942 FloatRegister src_reg = value->as_double_reg();
duke@435 1943 FloatRegister dst_reg = dest->as_double_reg();
duke@435 1944 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
duke@435 1945 break;
duke@435 1946 }
duke@435 1947 case lir_abs: {
duke@435 1948 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
duke@435 1949 FloatRegister src_reg = value->as_double_reg();
duke@435 1950 FloatRegister dst_reg = dest->as_double_reg();
duke@435 1951 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
duke@435 1952 break;
duke@435 1953 }
duke@435 1954 default: {
duke@435 1955 ShouldNotReachHere();
duke@435 1956 break;
duke@435 1957 }
duke@435 1958 }
duke@435 1959 }
duke@435 1960
duke@435 1961
duke@435 1962 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
duke@435 1963 if (right->is_constant()) {
duke@435 1964 if (dest->is_single_cpu()) {
duke@435 1965 int simm13 = right->as_constant_ptr()->as_jint();
duke@435 1966 switch (code) {
duke@435 1967 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1968 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1969 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break;
duke@435 1970 default: ShouldNotReachHere();
duke@435 1971 }
duke@435 1972 } else {
duke@435 1973 long c = right->as_constant_ptr()->as_jlong();
duke@435 1974 assert(c == (int)c && Assembler::is_simm13(c), "out of range");
duke@435 1975 int simm13 = (int)c;
duke@435 1976 switch (code) {
duke@435 1977 case lir_logic_and:
duke@435 1978 #ifndef _LP64
duke@435 1979 __ and3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1980 #endif
duke@435 1981 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 1982 break;
duke@435 1983
duke@435 1984 case lir_logic_or:
duke@435 1985 #ifndef _LP64
duke@435 1986 __ or3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1987 #endif
duke@435 1988 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 1989 break;
duke@435 1990
duke@435 1991 case lir_logic_xor:
duke@435 1992 #ifndef _LP64
duke@435 1993 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi());
duke@435 1994 #endif
duke@435 1995 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
duke@435 1996 break;
duke@435 1997
duke@435 1998 default: ShouldNotReachHere();
duke@435 1999 }
duke@435 2000 }
duke@435 2001 } else {
duke@435 2002 assert(right->is_register(), "right should be in register");
duke@435 2003
duke@435 2004 if (dest->is_single_cpu()) {
duke@435 2005 switch (code) {
duke@435 2006 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 2007 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 2008 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
duke@435 2009 default: ShouldNotReachHere();
duke@435 2010 }
duke@435 2011 } else {
duke@435 2012 #ifdef _LP64
duke@435 2013 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
duke@435 2014 left->as_register_lo();
duke@435 2015 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
duke@435 2016 right->as_register_lo();
duke@435 2017
duke@435 2018 switch (code) {
duke@435 2019 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
duke@435 2020 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break;
duke@435 2021 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
duke@435 2022 default: ShouldNotReachHere();
duke@435 2023 }
duke@435 2024 #else
duke@435 2025 switch (code) {
duke@435 2026 case lir_logic_and:
duke@435 2027 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 2028 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 2029 break;
duke@435 2030
duke@435 2031 case lir_logic_or:
duke@435 2032 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 2033 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 2034 break;
duke@435 2035
duke@435 2036 case lir_logic_xor:
duke@435 2037 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
duke@435 2038 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
duke@435 2039 break;
duke@435 2040
duke@435 2041 default: ShouldNotReachHere();
duke@435 2042 }
duke@435 2043 #endif
duke@435 2044 }
duke@435 2045 }
duke@435 2046 }
duke@435 2047
duke@435 2048
duke@435 2049 int LIR_Assembler::shift_amount(BasicType t) {
kvn@464 2050 int elem_size = type2aelembytes(t);
duke@435 2051 switch (elem_size) {
duke@435 2052 case 1 : return 0;
duke@435 2053 case 2 : return 1;
duke@435 2054 case 4 : return 2;
duke@435 2055 case 8 : return 3;
duke@435 2056 }
duke@435 2057 ShouldNotReachHere();
duke@435 2058 return -1;
duke@435 2059 }
duke@435 2060
duke@435 2061
never@1813 2062 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
duke@435 2063 assert(exceptionOop->as_register() == Oexception, "should match");
never@1813 2064 assert(exceptionPC->as_register() == Oissuing_pc, "should match");
duke@435 2065
duke@435 2066 info->add_register_oop(exceptionOop);
duke@435 2067
never@1813 2068 // reuse the debug info from the safepoint poll for the throw op itself
never@1813 2069 address pc_for_athrow = __ pc();
never@1813 2070 int pc_for_athrow_offset = __ offset();
never@1813 2071 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
never@1813 2072 __ set(pc_for_athrow, Oissuing_pc, rspec);
never@1813 2073 add_call_info(pc_for_athrow_offset, info); // for exception handler
never@1813 2074
never@1813 2075 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
never@1813 2076 __ delayed()->nop();
never@1813 2077 }
never@1813 2078
never@1813 2079
never@1813 2080 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
never@1813 2081 assert(exceptionOop->as_register() == Oexception, "should match");
never@1813 2082
never@1813 2083 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
never@1813 2084 __ delayed()->nop();
duke@435 2085 }
duke@435 2086
duke@435 2087 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
duke@435 2088 Register src = op->src()->as_register();
duke@435 2089 Register dst = op->dst()->as_register();
duke@435 2090 Register src_pos = op->src_pos()->as_register();
duke@435 2091 Register dst_pos = op->dst_pos()->as_register();
duke@435 2092 Register length = op->length()->as_register();
duke@435 2093 Register tmp = op->tmp()->as_register();
duke@435 2094 Register tmp2 = O7;
duke@435 2095
duke@435 2096 int flags = op->flags();
duke@435 2097 ciArrayKlass* default_type = op->expected_type();
duke@435 2098 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
duke@435 2099 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
duke@435 2100
iveresov@2731 2101 #ifdef _LP64
iveresov@2731 2102 // higher 32bits must be null
iveresov@2731 2103 __ sra(dst_pos, 0, dst_pos);
iveresov@2731 2104 __ sra(src_pos, 0, src_pos);
iveresov@2731 2105 __ sra(length, 0, length);
iveresov@2731 2106 #endif
iveresov@2731 2107
duke@435 2108 // set up the arraycopy stub information
duke@435 2109 ArrayCopyStub* stub = op->stub();
duke@435 2110
duke@435 2111 // always do stub if no type information is available. it's ok if
duke@435 2112 // the known type isn't loaded since the code sanity checks
duke@435 2113 // in debug mode and the type isn't required when we know the exact type
duke@435 2114 // also check that the type is an array type.
roland@2728 2115 if (op->expected_type() == NULL) {
duke@435 2116 __ mov(src, O0);
duke@435 2117 __ mov(src_pos, O1);
duke@435 2118 __ mov(dst, O2);
duke@435 2119 __ mov(dst_pos, O3);
duke@435 2120 __ mov(length, O4);
roland@2728 2121 address copyfunc_addr = StubRoutines::generic_arraycopy();
roland@2728 2122
roland@2728 2123 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
roland@2728 2124 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
roland@2728 2125 } else {
roland@2728 2126 #ifndef PRODUCT
roland@2728 2127 if (PrintC1Statistics) {
roland@2728 2128 address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
roland@2728 2129 __ inc_counter(counter, G1, G3);
roland@2728 2130 }
roland@2728 2131 #endif
roland@2728 2132 __ call_VM_leaf(tmp, copyfunc_addr);
roland@2728 2133 }
roland@2728 2134
roland@2728 2135 if (copyfunc_addr != NULL) {
roland@2728 2136 __ xor3(O0, -1, tmp);
roland@2728 2137 __ sub(length, tmp, length);
roland@2728 2138 __ add(src_pos, tmp, src_pos);
kvn@3037 2139 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
roland@2728 2140 __ delayed()->add(dst_pos, tmp, dst_pos);
roland@2728 2141 } else {
kvn@3037 2142 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
roland@2728 2143 __ delayed()->nop();
roland@2728 2144 }
duke@435 2145 __ bind(*stub->continuation());
duke@435 2146 return;
duke@435 2147 }
duke@435 2148
duke@435 2149 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
duke@435 2150
duke@435 2151 // make sure src and dst are non-null and load array length
duke@435 2152 if (flags & LIR_OpArrayCopy::src_null_check) {
duke@435 2153 __ tst(src);
iveresov@2344 2154 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
duke@435 2155 __ delayed()->nop();
duke@435 2156 }
duke@435 2157
duke@435 2158 if (flags & LIR_OpArrayCopy::dst_null_check) {
duke@435 2159 __ tst(dst);
iveresov@2344 2160 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
duke@435 2161 __ delayed()->nop();
duke@435 2162 }
duke@435 2163
duke@435 2164 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
duke@435 2165 // test src_pos register
kvn@3037 2166 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry());
duke@435 2167 __ delayed()->nop();
duke@435 2168 }
duke@435 2169
duke@435 2170 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
duke@435 2171 // test dst_pos register
kvn@3037 2172 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry());
duke@435 2173 __ delayed()->nop();
duke@435 2174 }
duke@435 2175
duke@435 2176 if (flags & LIR_OpArrayCopy::length_positive_check) {
duke@435 2177 // make sure length isn't negative
kvn@3037 2178 __ cmp_zero_and_br(Assembler::less, length, *stub->entry());
duke@435 2179 __ delayed()->nop();
duke@435 2180 }
duke@435 2181
duke@435 2182 if (flags & LIR_OpArrayCopy::src_range_check) {
duke@435 2183 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
duke@435 2184 __ add(length, src_pos, tmp);
duke@435 2185 __ cmp(tmp2, tmp);
duke@435 2186 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
duke@435 2187 __ delayed()->nop();
duke@435 2188 }
duke@435 2189
duke@435 2190 if (flags & LIR_OpArrayCopy::dst_range_check) {
duke@435 2191 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
duke@435 2192 __ add(length, dst_pos, tmp);
duke@435 2193 __ cmp(tmp2, tmp);
duke@435 2194 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
duke@435 2195 __ delayed()->nop();
duke@435 2196 }
duke@435 2197
roland@2728 2198 int shift = shift_amount(basic_type);
roland@2728 2199
duke@435 2200 if (flags & LIR_OpArrayCopy::type_check) {
roland@2728 2201 // We don't know the array types are compatible
roland@2728 2202 if (basic_type != T_OBJECT) {
roland@2728 2203 // Simple test for basic type arrays
coleenp@4037 2204 if (UseCompressedKlassPointers) {
roland@2728 2205 // We don't need decode because we just need to compare
roland@2728 2206 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
roland@2728 2207 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
roland@2728 2208 __ cmp(tmp, tmp2);
roland@2728 2209 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
roland@2728 2210 } else {
roland@2728 2211 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
roland@2728 2212 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
roland@2728 2213 __ cmp(tmp, tmp2);
roland@2728 2214 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
roland@2728 2215 }
roland@2728 2216 __ delayed()->nop();
iveresov@2344 2217 } else {
roland@2728 2218 // For object arrays, if src is a sub class of dst then we can
roland@2728 2219 // safely do the copy.
roland@2728 2220 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
roland@2728 2221
roland@2728 2222 Label cont, slow;
roland@2728 2223 assert_different_registers(tmp, tmp2, G3, G1);
roland@2728 2224
roland@2728 2225 __ load_klass(src, G3);
roland@2728 2226 __ load_klass(dst, G1);
roland@2728 2227
roland@2728 2228 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
roland@2728 2229
roland@2728 2230 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
roland@2728 2231 __ delayed()->nop();
roland@2728 2232
roland@2728 2233 __ cmp(G3, 0);
roland@2728 2234 if (copyfunc_addr != NULL) { // use stub if available
roland@2728 2235 // src is not a sub class of dst so we have to do a
roland@2728 2236 // per-element check.
roland@2728 2237 __ br(Assembler::notEqual, false, Assembler::pt, cont);
roland@2728 2238 __ delayed()->nop();
roland@2728 2239
roland@2728 2240 __ bind(slow);
roland@2728 2241
roland@2728 2242 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
roland@2728 2243 if ((flags & mask) != mask) {
roland@2728 2244 // Check that at least both of them object arrays.
roland@2728 2245 assert(flags & mask, "one of the two should be known to be an object array");
roland@2728 2246
roland@2728 2247 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
roland@2728 2248 __ load_klass(src, tmp);
roland@2728 2249 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
roland@2728 2250 __ load_klass(dst, tmp);
roland@2728 2251 }
stefank@3391 2252 int lh_offset = in_bytes(Klass::layout_helper_offset());
roland@2728 2253
roland@2728 2254 __ lduw(tmp, lh_offset, tmp2);
roland@2728 2255
roland@2728 2256 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
roland@2728 2257 __ set(objArray_lh, tmp);
roland@2728 2258 __ cmp(tmp, tmp2);
roland@2728 2259 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
roland@2728 2260 __ delayed()->nop();
roland@2728 2261 }
roland@2728 2262
roland@2728 2263 Register src_ptr = O0;
roland@2728 2264 Register dst_ptr = O1;
roland@2728 2265 Register len = O2;
roland@2728 2266 Register chk_off = O3;
roland@2728 2267 Register super_k = O4;
roland@2728 2268
roland@2728 2269 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
roland@2728 2270 if (shift == 0) {
roland@2728 2271 __ add(src_ptr, src_pos, src_ptr);
roland@2728 2272 } else {
roland@2728 2273 __ sll(src_pos, shift, tmp);
roland@2728 2274 __ add(src_ptr, tmp, src_ptr);
roland@2728 2275 }
roland@2728 2276
roland@2728 2277 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
roland@2728 2278 if (shift == 0) {
roland@2728 2279 __ add(dst_ptr, dst_pos, dst_ptr);
roland@2728 2280 } else {
roland@2728 2281 __ sll(dst_pos, shift, tmp);
roland@2728 2282 __ add(dst_ptr, tmp, dst_ptr);
roland@2728 2283 }
roland@2728 2284 __ mov(length, len);
roland@2728 2285 __ load_klass(dst, tmp);
roland@2728 2286
stefank@3391 2287 int ek_offset = in_bytes(objArrayKlass::element_klass_offset());
roland@2728 2288 __ ld_ptr(tmp, ek_offset, super_k);
roland@2728 2289
stefank@3391 2290 int sco_offset = in_bytes(Klass::super_check_offset_offset());
roland@2728 2291 __ lduw(super_k, sco_offset, chk_off);
roland@2728 2292
roland@2728 2293 __ call_VM_leaf(tmp, copyfunc_addr);
roland@2728 2294
roland@2728 2295 #ifndef PRODUCT
roland@2728 2296 if (PrintC1Statistics) {
roland@2728 2297 Label failed;
kvn@3037 2298 __ br_notnull_short(O0, Assembler::pn, failed);
roland@2728 2299 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3);
roland@2728 2300 __ bind(failed);
roland@2728 2301 }
roland@2728 2302 #endif
roland@2728 2303
roland@2728 2304 __ br_null(O0, false, Assembler::pt, *stub->continuation());
roland@2728 2305 __ delayed()->xor3(O0, -1, tmp);
roland@2728 2306
roland@2728 2307 #ifndef PRODUCT
roland@2728 2308 if (PrintC1Statistics) {
roland@2728 2309 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3);
roland@2728 2310 }
roland@2728 2311 #endif
roland@2728 2312
roland@2728 2313 __ sub(length, tmp, length);
roland@2728 2314 __ add(src_pos, tmp, src_pos);
roland@2728 2315 __ br(Assembler::always, false, Assembler::pt, *stub->entry());
roland@2728 2316 __ delayed()->add(dst_pos, tmp, dst_pos);
roland@2728 2317
roland@2728 2318 __ bind(cont);
roland@2728 2319 } else {
roland@2728 2320 __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
roland@2728 2321 __ delayed()->nop();
roland@2728 2322 __ bind(cont);
roland@2728 2323 }
iveresov@2344 2324 }
duke@435 2325 }
duke@435 2326
duke@435 2327 #ifdef ASSERT
duke@435 2328 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
duke@435 2329 // Sanity check the known type with the incoming class. For the
duke@435 2330 // primitive case the types must match exactly with src.klass and
duke@435 2331 // dst.klass each exactly matching the default type. For the
duke@435 2332 // object array case, if no type check is needed then either the
duke@435 2333 // dst type is exactly the expected type and the src type is a
duke@435 2334 // subtype which we can't check or src is the same array as dst
duke@435 2335 // but not necessarily exactly of type default_type.
duke@435 2336 Label known_ok, halt;
coleenp@4037 2337 metadata2reg(op->expected_type()->constant_encoding(), tmp);
coleenp@4037 2338 if (UseCompressedKlassPointers) {
iveresov@2344 2339 // tmp holds the default type. It currently comes uncompressed after the
iveresov@2344 2340 // load of a constant, so encode it.
iveresov@2344 2341 __ encode_heap_oop(tmp);
iveresov@2344 2342 // load the raw value of the dst klass, since we will be comparing
iveresov@2344 2343 // uncompressed values directly.
iveresov@2344 2344 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2345 if (basic_type != T_OBJECT) {
iveresov@2344 2346 __ cmp(tmp, tmp2);
iveresov@2344 2347 __ br(Assembler::notEqual, false, Assembler::pn, halt);
iveresov@2344 2348 // load the raw value of the src klass.
iveresov@2344 2349 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
kvn@3037 2350 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
iveresov@2344 2351 } else {
iveresov@2344 2352 __ cmp(tmp, tmp2);
iveresov@2344 2353 __ br(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2354 __ delayed()->cmp(src, dst);
iveresov@2344 2355 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2356 __ delayed()->nop();
iveresov@2344 2357 }
duke@435 2358 } else {
iveresov@2344 2359 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
iveresov@2344 2360 if (basic_type != T_OBJECT) {
iveresov@2344 2361 __ cmp(tmp, tmp2);
iveresov@2344 2362 __ brx(Assembler::notEqual, false, Assembler::pn, halt);
iveresov@2344 2363 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
kvn@3037 2364 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok);
iveresov@2344 2365 } else {
iveresov@2344 2366 __ cmp(tmp, tmp2);
iveresov@2344 2367 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2368 __ delayed()->cmp(src, dst);
iveresov@2344 2369 __ brx(Assembler::equal, false, Assembler::pn, known_ok);
iveresov@2344 2370 __ delayed()->nop();
iveresov@2344 2371 }
duke@435 2372 }
duke@435 2373 __ bind(halt);
duke@435 2374 __ stop("incorrect type information in arraycopy");
duke@435 2375 __ bind(known_ok);
duke@435 2376 }
duke@435 2377 #endif
duke@435 2378
roland@2728 2379 #ifndef PRODUCT
roland@2728 2380 if (PrintC1Statistics) {
roland@2728 2381 address counter = Runtime1::arraycopy_count_address(basic_type);
roland@2728 2382 __ inc_counter(counter, G1, G3);
roland@2728 2383 }
roland@2728 2384 #endif
duke@435 2385
duke@435 2386 Register src_ptr = O0;
duke@435 2387 Register dst_ptr = O1;
duke@435 2388 Register len = O2;
duke@435 2389
duke@435 2390 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
duke@435 2391 if (shift == 0) {
duke@435 2392 __ add(src_ptr, src_pos, src_ptr);
duke@435 2393 } else {
duke@435 2394 __ sll(src_pos, shift, tmp);
duke@435 2395 __ add(src_ptr, tmp, src_ptr);
duke@435 2396 }
duke@435 2397
duke@435 2398 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
duke@435 2399 if (shift == 0) {
duke@435 2400 __ add(dst_ptr, dst_pos, dst_ptr);
duke@435 2401 } else {
duke@435 2402 __ sll(dst_pos, shift, tmp);
duke@435 2403 __ add(dst_ptr, tmp, dst_ptr);
duke@435 2404 }
duke@435 2405
roland@2728 2406 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
roland@2728 2407 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
roland@2728 2408 const char *name;
roland@2728 2409 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
roland@2728 2410
roland@2728 2411 // arraycopy stubs takes a length in number of elements, so don't scale it.
roland@2728 2412 __ mov(length, len);
roland@2728 2413 __ call_VM_leaf(tmp, entry);
duke@435 2414
duke@435 2415 __ bind(*stub->continuation());
duke@435 2416 }
duke@435 2417
duke@435 2418
duke@435 2419 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
duke@435 2420 if (dest->is_single_cpu()) {
duke@435 2421 #ifdef _LP64
duke@435 2422 if (left->type() == T_OBJECT) {
duke@435 2423 switch (code) {
duke@435 2424 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2425 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2426 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2427 default: ShouldNotReachHere();
duke@435 2428 }
duke@435 2429 } else
duke@435 2430 #endif
duke@435 2431 switch (code) {
duke@435 2432 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2433 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2434 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break;
duke@435 2435 default: ShouldNotReachHere();
duke@435 2436 }
duke@435 2437 } else {
duke@435 2438 #ifdef _LP64
duke@435 2439 switch (code) {
duke@435 2440 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2441 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2442 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
duke@435 2443 default: ShouldNotReachHere();
duke@435 2444 }
duke@435 2445 #else
duke@435 2446 switch (code) {
duke@435 2447 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2448 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2449 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
duke@435 2450 default: ShouldNotReachHere();
duke@435 2451 }
duke@435 2452 #endif
duke@435 2453 }
duke@435 2454 }
duke@435 2455
duke@435 2456
duke@435 2457 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
duke@435 2458 #ifdef _LP64
duke@435 2459 if (left->type() == T_OBJECT) {
duke@435 2460 count = count & 63; // shouldn't shift by more than sizeof(intptr_t)
duke@435 2461 Register l = left->as_register();
duke@435 2462 Register d = dest->as_register_lo();
duke@435 2463 switch (code) {
duke@435 2464 case lir_shl: __ sllx (l, count, d); break;
duke@435 2465 case lir_shr: __ srax (l, count, d); break;
duke@435 2466 case lir_ushr: __ srlx (l, count, d); break;
duke@435 2467 default: ShouldNotReachHere();
duke@435 2468 }
duke@435 2469 return;
duke@435 2470 }
duke@435 2471 #endif
duke@435 2472
duke@435 2473 if (dest->is_single_cpu()) {
duke@435 2474 count = count & 0x1F; // Java spec
duke@435 2475 switch (code) {
duke@435 2476 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break;
duke@435 2477 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break;
duke@435 2478 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break;
duke@435 2479 default: ShouldNotReachHere();
duke@435 2480 }
duke@435 2481 } else if (dest->is_double_cpu()) {
duke@435 2482 count = count & 63; // Java spec
duke@435 2483 switch (code) {
duke@435 2484 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2485 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2486 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break;
duke@435 2487 default: ShouldNotReachHere();
duke@435 2488 }
duke@435 2489 } else {
duke@435 2490 ShouldNotReachHere();
duke@435 2491 }
duke@435 2492 }
duke@435 2493
duke@435 2494
duke@435 2495 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
duke@435 2496 assert(op->tmp1()->as_register() == G1 &&
duke@435 2497 op->tmp2()->as_register() == G3 &&
duke@435 2498 op->tmp3()->as_register() == G4 &&
duke@435 2499 op->obj()->as_register() == O0 &&
duke@435 2500 op->klass()->as_register() == G5, "must be");
duke@435 2501 if (op->init_check()) {
coleenp@3368 2502 __ ldub(op->klass()->as_register(),
coleenp@4037 2503 in_bytes(InstanceKlass::init_state_offset()),
duke@435 2504 op->tmp1()->as_register());
duke@435 2505 add_debug_info_for_null_check_here(op->stub()->info());
coleenp@4037 2506 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
duke@435 2507 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
duke@435 2508 __ delayed()->nop();
duke@435 2509 }
duke@435 2510 __ allocate_object(op->obj()->as_register(),
duke@435 2511 op->tmp1()->as_register(),
duke@435 2512 op->tmp2()->as_register(),
duke@435 2513 op->tmp3()->as_register(),
duke@435 2514 op->header_size(),
duke@435 2515 op->object_size(),
duke@435 2516 op->klass()->as_register(),
duke@435 2517 *op->stub()->entry());
duke@435 2518 __ bind(*op->stub()->continuation());
duke@435 2519 __ verify_oop(op->obj()->as_register());
duke@435 2520 }
duke@435 2521
duke@435 2522
duke@435 2523 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
duke@435 2524 assert(op->tmp1()->as_register() == G1 &&
duke@435 2525 op->tmp2()->as_register() == G3 &&
duke@435 2526 op->tmp3()->as_register() == G4 &&
duke@435 2527 op->tmp4()->as_register() == O1 &&
duke@435 2528 op->klass()->as_register() == G5, "must be");
iveresov@2432 2529
iveresov@2432 2530 LP64_ONLY( __ signx(op->len()->as_register()); )
duke@435 2531 if (UseSlowPath ||
duke@435 2532 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
duke@435 2533 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
never@1813 2534 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 2535 __ delayed()->nop();
duke@435 2536 } else {
duke@435 2537 __ allocate_array(op->obj()->as_register(),
duke@435 2538 op->len()->as_register(),
duke@435 2539 op->tmp1()->as_register(),
duke@435 2540 op->tmp2()->as_register(),
duke@435 2541 op->tmp3()->as_register(),
duke@435 2542 arrayOopDesc::header_size(op->type()),
kvn@464 2543 type2aelembytes(op->type()),
duke@435 2544 op->klass()->as_register(),
duke@435 2545 *op->stub()->entry());
duke@435 2546 }
duke@435 2547 __ bind(*op->stub()->continuation());
duke@435 2548 }
duke@435 2549
duke@435 2550
iveresov@2138 2551 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
iveresov@2138 2552 ciMethodData *md, ciProfileData *data,
iveresov@2138 2553 Register recv, Register tmp1, Label* update_done) {
iveresov@2138 2554 uint i;
iveresov@2138 2555 for (i = 0; i < VirtualCallData::row_limit(); i++) {
iveresov@2138 2556 Label next_test;
iveresov@2138 2557 // See if the receiver is receiver[n].
iveresov@2138 2558 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
iveresov@2138 2559 mdo_offset_bias);
iveresov@2138 2560 __ ld_ptr(receiver_addr, tmp1);
iveresov@2138 2561 __ verify_oop(tmp1);
kvn@3037 2562 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
iveresov@2138 2563 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
iveresov@2138 2564 mdo_offset_bias);
iveresov@2138 2565 __ ld_ptr(data_addr, tmp1);
iveresov@2138 2566 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2567 __ st_ptr(tmp1, data_addr);
kvn@3037 2568 __ ba(*update_done);
iveresov@2138 2569 __ delayed()->nop();
iveresov@2138 2570 __ bind(next_test);
iveresov@2138 2571 }
iveresov@2138 2572
iveresov@2138 2573 // Didn't find receiver; find next empty slot and fill it in
iveresov@2138 2574 for (i = 0; i < VirtualCallData::row_limit(); i++) {
iveresov@2138 2575 Label next_test;
iveresov@2138 2576 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
iveresov@2138 2577 mdo_offset_bias);
iveresov@2344 2578 __ ld_ptr(recv_addr, tmp1);
kvn@3037 2579 __ br_notnull_short(tmp1, Assembler::pt, next_test);
iveresov@2138 2580 __ st_ptr(recv, recv_addr);
iveresov@2138 2581 __ set(DataLayout::counter_increment, tmp1);
iveresov@2138 2582 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
iveresov@2138 2583 mdo_offset_bias);
kvn@3037 2584 __ ba(*update_done);
iveresov@2138 2585 __ delayed()->nop();
iveresov@2138 2586 __ bind(next_test);
iveresov@2138 2587 }
iveresov@2138 2588 }
iveresov@2138 2589
iveresov@2146 2590
iveresov@2146 2591 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
iveresov@2146 2592 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
iveresov@2349 2593 md = method->method_data_or_null();
iveresov@2349 2594 assert(md != NULL, "Sanity");
iveresov@2146 2595 data = md->bci_to_data(bci);
iveresov@2146 2596 assert(data != NULL, "need data for checkcast");
iveresov@2146 2597 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
iveresov@2146 2598 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
iveresov@2146 2599 // The offset is large so bias the mdo by the base of the slot so
iveresov@2146 2600 // that the ld can use simm13s to reference the slots of the data
iveresov@2146 2601 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
iveresov@2146 2602 }
iveresov@2146 2603 }
iveresov@2146 2604
iveresov@2146 2605 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
iveresov@2138 2606 // we always need a stub for the failure case.
iveresov@2138 2607 CodeStub* stub = op->stub();
iveresov@2138 2608 Register obj = op->object()->as_register();
iveresov@2138 2609 Register k_RInfo = op->tmp1()->as_register();
iveresov@2138 2610 Register klass_RInfo = op->tmp2()->as_register();
iveresov@2138 2611 Register dst = op->result_opr()->as_register();
iveresov@2138 2612 Register Rtmp1 = op->tmp3()->as_register();
iveresov@2138 2613 ciKlass* k = op->klass();
iveresov@2138 2614
iveresov@2138 2615
iveresov@2138 2616 if (obj == k_RInfo) {
iveresov@2138 2617 k_RInfo = klass_RInfo;
iveresov@2138 2618 klass_RInfo = obj;
iveresov@2138 2619 }
iveresov@2138 2620
iveresov@2138 2621 ciMethodData* md;
iveresov@2138 2622 ciProfileData* data;
iveresov@2138 2623 int mdo_offset_bias = 0;
iveresov@2138 2624 if (op->should_profile()) {
iveresov@2138 2625 ciMethod* method = op->profiled_method();
iveresov@2138 2626 assert(method != NULL, "Should have method");
iveresov@2146 2627 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
iveresov@2146 2628
iveresov@2146 2629 Label not_null;
kvn@3037 2630 __ br_notnull_short(obj, Assembler::pn, not_null);
iveresov@2138 2631 Register mdo = k_RInfo;
iveresov@2138 2632 Register data_val = Rtmp1;
coleenp@4037 2633 metadata2reg(md->constant_encoding(), mdo);
iveresov@2138 2634 if (mdo_offset_bias > 0) {
iveresov@2138 2635 __ set(mdo_offset_bias, data_val);
iveresov@2138 2636 __ add(mdo, data_val, mdo);
iveresov@2138 2637 }
iveresov@2138 2638 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
iveresov@2138 2639 __ ldub(flags_addr, data_val);
iveresov@2138 2640 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
iveresov@2138 2641 __ stb(data_val, flags_addr);
kvn@3037 2642 __ ba(*obj_is_null);
iveresov@2146 2643 __ delayed()->nop();
iveresov@2146 2644 __ bind(not_null);
iveresov@2146 2645 } else {
iveresov@2146 2646 __ br_null(obj, false, Assembler::pn, *obj_is_null);
iveresov@2146 2647 __ delayed()->nop();
iveresov@2138 2648 }
iveresov@2146 2649
iveresov@2146 2650 Label profile_cast_failure, profile_cast_success;
iveresov@2146 2651 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
iveresov@2146 2652 Label *success_target = op->should_profile() ? &profile_cast_success : success;
iveresov@2138 2653
iveresov@2138 2654 // patching may screw with our temporaries on sparc,
iveresov@2138 2655 // so let's do it before loading the class
iveresov@2138 2656 if (k->is_loaded()) {
coleenp@4037 2657 metadata2reg(k->constant_encoding(), k_RInfo);
iveresov@2138 2658 } else {
coleenp@4037 2659 klass2reg_with_patching(k_RInfo, op->info_for_patch());
iveresov@2138 2660 }
iveresov@2138 2661 assert(obj != k_RInfo, "must be different");
iveresov@2138 2662
iveresov@2138 2663 // get object class
iveresov@2138 2664 // not a safepoint as obj null check happens earlier
iveresov@2344 2665 __ load_klass(obj, klass_RInfo);
iveresov@2138 2666 if (op->fast_check()) {
iveresov@2138 2667 assert_different_registers(klass_RInfo, k_RInfo);
iveresov@2138 2668 __ cmp(k_RInfo, klass_RInfo);
iveresov@2138 2669 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
iveresov@2138 2670 __ delayed()->nop();
iveresov@2138 2671 } else {
iveresov@2138 2672 bool need_slow_path = true;
iveresov@2138 2673 if (k->is_loaded()) {
stefank@3391 2674 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset()))
iveresov@2138 2675 need_slow_path = false;
iveresov@2138 2676 // perform the fast part of the checking logic
iveresov@2138 2677 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
iveresov@2146 2678 (need_slow_path ? success_target : NULL),
iveresov@2138 2679 failure_target, NULL,
iveresov@2138 2680 RegisterOrConstant(k->super_check_offset()));
iveresov@2138 2681 } else {
iveresov@2138 2682 // perform the fast part of the checking logic
iveresov@2146 2683 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
iveresov@2138 2684 failure_target, NULL);
iveresov@2138 2685 }
iveresov@2138 2686 if (need_slow_path) {
iveresov@2138 2687 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
iveresov@2138 2688 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
iveresov@2138 2689 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
iveresov@2138 2690 __ delayed()->nop();
iveresov@2138 2691 __ cmp(G3, 0);
iveresov@2138 2692 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
iveresov@2138 2693 __ delayed()->nop();
iveresov@2146 2694 // Fall through to success case
iveresov@2138 2695 }
iveresov@2138 2696 }
iveresov@2138 2697
iveresov@2138 2698 if (op->should_profile()) {
iveresov@2138 2699 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
iveresov@2138 2700 assert_different_registers(obj, mdo, recv, tmp1);
iveresov@2146 2701 __ bind(profile_cast_success);
coleenp@4037 2702 metadata2reg(md->constant_encoding(), mdo);
iveresov@2138 2703 if (mdo_offset_bias > 0) {
iveresov@2138 2704 __ set(mdo_offset_bias, tmp1);
iveresov@2138 2705 __ add(mdo, tmp1, mdo);
iveresov@2138 2706 }
iveresov@2344 2707 __ load_klass(obj, recv);
iveresov@2146 2708 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
iveresov@2138 2709 // Jump over the failure case
kvn@3037 2710 __ ba(*success);
iveresov@2138 2711 __ delayed()->nop();
iveresov@2138 2712 // Cast failure case
iveresov@2138 2713 __ bind(profile_cast_failure);
coleenp@4037 2714 metadata2reg(md->constant_encoding(), mdo);
iveresov@2138 2715 if (mdo_offset_bias > 0) {
iveresov@2138 2716 __ set(mdo_offset_bias, tmp1);
iveresov@2138 2717 __ add(mdo, tmp1, mdo);
iveresov@2138 2718 }
iveresov@2138 2719 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
iveresov@2138 2720 __ ld_ptr(data_addr, tmp1);
iveresov@2138 2721 __ sub(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 2722 __ st_ptr(tmp1, data_addr);
kvn@3037 2723 __ ba(*failure);
iveresov@2138 2724 __ delayed()->nop();
iveresov@2138 2725 }
kvn@3037 2726 __ ba(*success);
iveresov@2146 2727 __ delayed()->nop();
iveresov@2138 2728 }
iveresov@2138 2729
duke@435 2730 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
duke@435 2731 LIR_Code code = op->code();
duke@435 2732 if (code == lir_store_check) {
duke@435 2733 Register value = op->object()->as_register();
duke@435 2734 Register array = op->array()->as_register();
duke@435 2735 Register k_RInfo = op->tmp1()->as_register();
duke@435 2736 Register klass_RInfo = op->tmp2()->as_register();
duke@435 2737 Register Rtmp1 = op->tmp3()->as_register();
duke@435 2738
duke@435 2739 __ verify_oop(value);
duke@435 2740 CodeStub* stub = op->stub();
iveresov@2146 2741 // check if it needs to be profiled
iveresov@2146 2742 ciMethodData* md;
iveresov@2146 2743 ciProfileData* data;
iveresov@2146 2744 int mdo_offset_bias = 0;
iveresov@2146 2745 if (op->should_profile()) {
iveresov@2146 2746 ciMethod* method = op->profiled_method();
iveresov@2146 2747 assert(method != NULL, "Should have method");
iveresov@2146 2748 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
iveresov@2146 2749 }
iveresov@2146 2750 Label profile_cast_success, profile_cast_failure, done;
iveresov@2146 2751 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
iveresov@2146 2752 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
iveresov@2146 2753
iveresov@2146 2754 if (op->should_profile()) {
iveresov@2146 2755 Label not_null;
kvn@3037 2756 __ br_notnull_short(value, Assembler::pn, not_null);
iveresov@2146 2757 Register mdo = k_RInfo;
iveresov@2146 2758 Register data_val = Rtmp1;
coleenp@4037 2759 metadata2reg(md->constant_encoding(), mdo);
iveresov@2146 2760 if (mdo_offset_bias > 0) {
iveresov@2146 2761 __ set(mdo_offset_bias, data_val);
iveresov@2146 2762 __ add(mdo, data_val, mdo);
iveresov@2146 2763 }
iveresov@2146 2764 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
iveresov@2146 2765 __ ldub(flags_addr, data_val);
iveresov@2146 2766 __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
iveresov@2146 2767 __ stb(data_val, flags_addr);
kvn@3037 2768 __ ba_short(done);
iveresov@2146 2769 __ bind(not_null);
iveresov@2146 2770 } else {
kvn@3037 2771 __ br_null_short(value, Assembler::pn, done);
iveresov@2146 2772 }
iveresov@2344 2773 add_debug_info_for_null_check_here(op->info_for_exception());
iveresov@2344 2774 __ load_klass(array, k_RInfo);
iveresov@2344 2775 __ load_klass(value, klass_RInfo);
duke@435 2776
duke@435 2777 // get instance klass
stefank@3391 2778 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo);
jrose@1079 2779 // perform the fast part of the checking logic
iveresov@2146 2780 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
jrose@1079 2781
jrose@1079 2782 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
jrose@1079 2783 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
duke@435 2784 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
duke@435 2785 __ delayed()->nop();
duke@435 2786 __ cmp(G3, 0);
iveresov@2146 2787 __ br(Assembler::equal, false, Assembler::pn, *failure_target);
duke@435 2788 __ delayed()->nop();
iveresov@2146 2789 // fall through to the success case
iveresov@2146 2790
iveresov@2146 2791 if (op->should_profile()) {
iveresov@2146 2792 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
iveresov@2146 2793 assert_different_registers(value, mdo, recv, tmp1);
iveresov@2146 2794 __ bind(profile_cast_success);
coleenp@4037 2795 metadata2reg(md->constant_encoding(), mdo);
iveresov@2146 2796 if (mdo_offset_bias > 0) {
iveresov@2146 2797 __ set(mdo_offset_bias, tmp1);
iveresov@2146 2798 __ add(mdo, tmp1, mdo);
iveresov@2146 2799 }
iveresov@2344 2800 __ load_klass(value, recv);
iveresov@2146 2801 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
kvn@3037 2802 __ ba_short(done);
iveresov@2146 2803 // Cast failure case
iveresov@2146 2804 __ bind(profile_cast_failure);
coleenp@4037 2805 metadata2reg(md->constant_encoding(), mdo);
iveresov@2146 2806 if (mdo_offset_bias > 0) {
iveresov@2146 2807 __ set(mdo_offset_bias, tmp1);
iveresov@2146 2808 __ add(mdo, tmp1, mdo);
iveresov@2146 2809 }
iveresov@2146 2810 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
iveresov@2146 2811 __ ld_ptr(data_addr, tmp1);
iveresov@2146 2812 __ sub(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2146 2813 __ st_ptr(tmp1, data_addr);
kvn@3037 2814 __ ba(*stub->entry());
iveresov@2146 2815 __ delayed()->nop();
iveresov@2146 2816 }
duke@435 2817 __ bind(done);
iveresov@2146 2818 } else if (code == lir_checkcast) {
iveresov@2146 2819 Register obj = op->object()->as_register();
iveresov@2146 2820 Register dst = op->result_opr()->as_register();
iveresov@2146 2821 Label success;
iveresov@2146 2822 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
iveresov@2146 2823 __ bind(success);
iveresov@2146 2824 __ mov(obj, dst);
duke@435 2825 } else if (code == lir_instanceof) {
duke@435 2826 Register obj = op->object()->as_register();
duke@435 2827 Register dst = op->result_opr()->as_register();
iveresov@2146 2828 Label success, failure, done;
iveresov@2146 2829 emit_typecheck_helper(op, &success, &failure, &failure);
iveresov@2146 2830 __ bind(failure);
iveresov@2146 2831 __ set(0, dst);
kvn@3037 2832 __ ba_short(done);
iveresov@2146 2833 __ bind(success);
iveresov@2146 2834 __ set(1, dst);
iveresov@2146 2835 __ bind(done);
duke@435 2836 } else {
duke@435 2837 ShouldNotReachHere();
duke@435 2838 }
duke@435 2839
duke@435 2840 }
duke@435 2841
duke@435 2842
duke@435 2843 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
duke@435 2844 if (op->code() == lir_cas_long) {
duke@435 2845 assert(VM_Version::supports_cx8(), "wrong machine");
duke@435 2846 Register addr = op->addr()->as_pointer_register();
duke@435 2847 Register cmp_value_lo = op->cmp_value()->as_register_lo();
duke@435 2848 Register cmp_value_hi = op->cmp_value()->as_register_hi();
duke@435 2849 Register new_value_lo = op->new_value()->as_register_lo();
duke@435 2850 Register new_value_hi = op->new_value()->as_register_hi();
duke@435 2851 Register t1 = op->tmp1()->as_register();
duke@435 2852 Register t2 = op->tmp2()->as_register();
duke@435 2853 #ifdef _LP64
duke@435 2854 __ mov(cmp_value_lo, t1);
duke@435 2855 __ mov(new_value_lo, t2);
iveresov@2412 2856 // perform the compare and swap operation
iveresov@2412 2857 __ casx(addr, t1, t2);
iveresov@2412 2858 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
iveresov@2412 2859 // overwritten with the original value in "addr" and will be equal to t1.
iveresov@2412 2860 __ cmp(t1, t2);
duke@435 2861 #else
duke@435 2862 // move high and low halves of long values into single registers
duke@435 2863 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg
duke@435 2864 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
duke@435 2865 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value
duke@435 2866 __ sllx(new_value_hi, 32, t2);
duke@435 2867 __ srl(new_value_lo, 0, new_value_lo);
duke@435 2868 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap
duke@435 2869 // perform the compare and swap operation
duke@435 2870 __ casx(addr, t1, t2);
duke@435 2871 // generate condition code - if the swap succeeded, t2 ("new value" reg) was
duke@435 2872 // overwritten with the original value in "addr" and will be equal to t1.
iveresov@2412 2873 // Produce icc flag for 32bit.
iveresov@2412 2874 __ sub(t1, t2, t2);
iveresov@2412 2875 __ srlx(t2, 32, t1);
iveresov@2412 2876 __ orcc(t2, t1, G0);
iveresov@2412 2877 #endif
duke@435 2878 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
duke@435 2879 Register addr = op->addr()->as_pointer_register();
duke@435 2880 Register cmp_value = op->cmp_value()->as_register();
duke@435 2881 Register new_value = op->new_value()->as_register();
duke@435 2882 Register t1 = op->tmp1()->as_register();
duke@435 2883 Register t2 = op->tmp2()->as_register();
duke@435 2884 __ mov(cmp_value, t1);
duke@435 2885 __ mov(new_value, t2);
duke@435 2886 if (op->code() == lir_cas_obj) {
iveresov@2344 2887 if (UseCompressedOops) {
iveresov@2344 2888 __ encode_heap_oop(t1);
iveresov@2344 2889 __ encode_heap_oop(t2);
duke@435 2890 __ cas(addr, t1, t2);
iveresov@2344 2891 } else {
never@2352 2892 __ cas_ptr(addr, t1, t2);
duke@435 2893 }
iveresov@2344 2894 } else {
iveresov@2344 2895 __ cas(addr, t1, t2);
iveresov@2344 2896 }
duke@435 2897 __ cmp(t1, t2);
duke@435 2898 } else {
duke@435 2899 Unimplemented();
duke@435 2900 }
duke@435 2901 }
duke@435 2902
duke@435 2903 void LIR_Assembler::set_24bit_FPU() {
duke@435 2904 Unimplemented();
duke@435 2905 }
duke@435 2906
duke@435 2907
duke@435 2908 void LIR_Assembler::reset_FPU() {
duke@435 2909 Unimplemented();
duke@435 2910 }
duke@435 2911
duke@435 2912
duke@435 2913 void LIR_Assembler::breakpoint() {
duke@435 2914 __ breakpoint_trap();
duke@435 2915 }
duke@435 2916
duke@435 2917
duke@435 2918 void LIR_Assembler::push(LIR_Opr opr) {
duke@435 2919 Unimplemented();
duke@435 2920 }
duke@435 2921
duke@435 2922
duke@435 2923 void LIR_Assembler::pop(LIR_Opr opr) {
duke@435 2924 Unimplemented();
duke@435 2925 }
duke@435 2926
duke@435 2927
duke@435 2928 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
duke@435 2929 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
duke@435 2930 Register dst = dst_opr->as_register();
duke@435 2931 Register reg = mon_addr.base();
duke@435 2932 int offset = mon_addr.disp();
duke@435 2933 // compute pointer to BasicLock
duke@435 2934 if (mon_addr.is_simm13()) {
duke@435 2935 __ add(reg, offset, dst);
duke@435 2936 } else {
duke@435 2937 __ set(offset, dst);
duke@435 2938 __ add(dst, reg, dst);
duke@435 2939 }
duke@435 2940 }
duke@435 2941
duke@435 2942
duke@435 2943 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
duke@435 2944 Register obj = op->obj_opr()->as_register();
duke@435 2945 Register hdr = op->hdr_opr()->as_register();
duke@435 2946 Register lock = op->lock_opr()->as_register();
duke@435 2947
duke@435 2948 // obj may not be an oop
duke@435 2949 if (op->code() == lir_lock) {
duke@435 2950 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
duke@435 2951 if (UseFastLocking) {
duke@435 2952 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 2953 // add debug info for NullPointerException only if one is possible
duke@435 2954 if (op->info() != NULL) {
duke@435 2955 add_debug_info_for_null_check_here(op->info());
duke@435 2956 }
duke@435 2957 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
duke@435 2958 } else {
duke@435 2959 // always do slow locking
duke@435 2960 // note: the slow locking code could be inlined here, however if we use
duke@435 2961 // slow locking, speed doesn't matter anyway and this solution is
duke@435 2962 // simpler and requires less duplicated code - additionally, the
duke@435 2963 // slow locking code is the same in either case which simplifies
duke@435 2964 // debugging
duke@435 2965 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 2966 __ delayed()->nop();
duke@435 2967 }
duke@435 2968 } else {
duke@435 2969 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
duke@435 2970 if (UseFastLocking) {
duke@435 2971 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
duke@435 2972 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
duke@435 2973 } else {
duke@435 2974 // always do slow unlocking
duke@435 2975 // note: the slow unlocking code could be inlined here, however if we use
duke@435 2976 // slow unlocking, speed doesn't matter anyway and this solution is
duke@435 2977 // simpler and requires less duplicated code - additionally, the
duke@435 2978 // slow unlocking code is the same in either case which simplifies
duke@435 2979 // debugging
duke@435 2980 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
duke@435 2981 __ delayed()->nop();
duke@435 2982 }
duke@435 2983 }
duke@435 2984 __ bind(*op->stub()->continuation());
duke@435 2985 }
duke@435 2986
duke@435 2987
duke@435 2988 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
duke@435 2989 ciMethod* method = op->profiled_method();
duke@435 2990 int bci = op->profiled_bci();
twisti@3969 2991 ciMethod* callee = op->profiled_callee();
duke@435 2992
duke@435 2993 // Update counter for all call types
iveresov@2349 2994 ciMethodData* md = method->method_data_or_null();
iveresov@2349 2995 assert(md != NULL, "Sanity");
duke@435 2996 ciProfileData* data = md->bci_to_data(bci);
duke@435 2997 assert(data->is_CounterData(), "need CounterData for calls");
duke@435 2998 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
iveresov@2138 2999 Register mdo = op->mdo()->as_register();
iveresov@2138 3000 #ifdef _LP64
iveresov@2138 3001 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
iveresov@2138 3002 Register tmp1 = op->tmp1()->as_register_lo();
iveresov@2138 3003 #else
duke@435 3004 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
duke@435 3005 Register tmp1 = op->tmp1()->as_register();
iveresov@2138 3006 #endif
coleenp@4037 3007 metadata2reg(md->constant_encoding(), mdo);
duke@435 3008 int mdo_offset_bias = 0;
duke@435 3009 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
duke@435 3010 data->size_in_bytes())) {
duke@435 3011 // The offset is large so bias the mdo by the base of the slot so
duke@435 3012 // that the ld can use simm13s to reference the slots of the data
duke@435 3013 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
duke@435 3014 __ set(mdo_offset_bias, O7);
duke@435 3015 __ add(mdo, O7, mdo);
duke@435 3016 }
duke@435 3017
twisti@1162 3018 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
duke@435 3019 Bytecodes::Code bc = method->java_code_at_bci(bci);
twisti@3969 3020 const bool callee_is_static = callee->is_loaded() && callee->is_static();
duke@435 3021 // Perform additional virtual call profiling for invokevirtual and
duke@435 3022 // invokeinterface bytecodes
duke@435 3023 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
twisti@3969 3024 !callee_is_static && // required for optimized MH invokes
iveresov@2138 3025 C1ProfileVirtualCalls) {
duke@435 3026 assert(op->recv()->is_single_cpu(), "recv must be allocated");
duke@435 3027 Register recv = op->recv()->as_register();
duke@435 3028 assert_different_registers(mdo, tmp1, recv);
duke@435 3029 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
duke@435 3030 ciKlass* known_klass = op->known_holder();
iveresov@2138 3031 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
duke@435 3032 // We know the type that will be seen at this call site; we can
coleenp@4037 3033 // statically update the MethodData* rather than needing to do
duke@435 3034 // dynamic tests on the receiver type
duke@435 3035
duke@435 3036 // NOTE: we should probably put a lock around this search to
duke@435 3037 // avoid collisions by concurrent compilations
duke@435 3038 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
duke@435 3039 uint i;
duke@435 3040 for (i = 0; i < VirtualCallData::row_limit(); i++) {
duke@435 3041 ciKlass* receiver = vc_data->receiver(i);
duke@435 3042 if (known_klass->equals(receiver)) {
twisti@1162 3043 Address data_addr(mdo, md->byte_offset_of_slot(data,
twisti@1162 3044 VirtualCallData::receiver_count_offset(i)) -
duke@435 3045 mdo_offset_bias);
iveresov@2138 3046 __ ld_ptr(data_addr, tmp1);
duke@435 3047 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3048 __ st_ptr(tmp1, data_addr);
duke@435 3049 return;
duke@435 3050 }
duke@435 3051 }
duke@435 3052
duke@435 3053 // Receiver type not found in profile data; select an empty slot
duke@435 3054
duke@435 3055 // Note that this is less efficient than it should be because it
duke@435 3056 // always does a write to the receiver part of the
duke@435 3057 // VirtualCallData rather than just the first time
duke@435 3058 for (i = 0; i < VirtualCallData::row_limit(); i++) {
duke@435 3059 ciKlass* receiver = vc_data->receiver(i);
duke@435 3060 if (receiver == NULL) {
twisti@1162 3061 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
duke@435 3062 mdo_offset_bias);
coleenp@4037 3063 metadata2reg(known_klass->constant_encoding(), tmp1);
duke@435 3064 __ st_ptr(tmp1, recv_addr);
twisti@1162 3065 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
duke@435 3066 mdo_offset_bias);
iveresov@2138 3067 __ ld_ptr(data_addr, tmp1);
duke@435 3068 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3069 __ st_ptr(tmp1, data_addr);
duke@435 3070 return;
duke@435 3071 }
duke@435 3072 }
duke@435 3073 } else {
iveresov@2344 3074 __ load_klass(recv, recv);
duke@435 3075 Label update_done;
iveresov@2138 3076 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
kvn@1686 3077 // Receiver did not match any saved receiver and there is no empty row for it.
kvn@1686 3078 // Increment total counter to indicate polymorphic case.
iveresov@2138 3079 __ ld_ptr(counter_addr, tmp1);
kvn@1686 3080 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3081 __ st_ptr(tmp1, counter_addr);
duke@435 3082
duke@435 3083 __ bind(update_done);
duke@435 3084 }
kvn@1686 3085 } else {
kvn@1686 3086 // Static call
iveresov@2138 3087 __ ld_ptr(counter_addr, tmp1);
kvn@1686 3088 __ add(tmp1, DataLayout::counter_increment, tmp1);
iveresov@2138 3089 __ st_ptr(tmp1, counter_addr);
duke@435 3090 }
duke@435 3091 }
duke@435 3092
duke@435 3093 void LIR_Assembler::align_backward_branch_target() {
kvn@1800 3094 __ align(OptoLoopAlignment);
duke@435 3095 }
duke@435 3096
duke@435 3097
duke@435 3098 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
duke@435 3099 // make sure we are expecting a delay
duke@435 3100 // this has the side effect of clearing the delay state
duke@435 3101 // so we can use _masm instead of _masm->delayed() to do the
duke@435 3102 // code generation.
duke@435 3103 __ delayed();
duke@435 3104
duke@435 3105 // make sure we only emit one instruction
duke@435 3106 int offset = code_offset();
duke@435 3107 op->delay_op()->emit_code(this);
duke@435 3108 #ifdef ASSERT
duke@435 3109 if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
duke@435 3110 op->delay_op()->print();
duke@435 3111 }
duke@435 3112 assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
duke@435 3113 "only one instruction can go in a delay slot");
duke@435 3114 #endif
duke@435 3115
duke@435 3116 // we may also be emitting the call info for the instruction
duke@435 3117 // which we are the delay slot of.
twisti@1919 3118 CodeEmitInfo* call_info = op->call_info();
duke@435 3119 if (call_info) {
duke@435 3120 add_call_info(code_offset(), call_info);
duke@435 3121 }
duke@435 3122
duke@435 3123 if (VerifyStackAtCalls) {
duke@435 3124 _masm->sub(FP, SP, O7);
duke@435 3125 _masm->cmp(O7, initial_frame_size_in_bytes());
duke@435 3126 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
duke@435 3127 }
duke@435 3128 }
duke@435 3129
duke@435 3130
duke@435 3131 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
duke@435 3132 assert(left->is_register(), "can only handle registers");
duke@435 3133
duke@435 3134 if (left->is_single_cpu()) {
duke@435 3135 __ neg(left->as_register(), dest->as_register());
duke@435 3136 } else if (left->is_single_fpu()) {
duke@435 3137 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
duke@435 3138 } else if (left->is_double_fpu()) {
duke@435 3139 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
duke@435 3140 } else {
duke@435 3141 assert (left->is_double_cpu(), "Must be a long");
duke@435 3142 Register Rlow = left->as_register_lo();
duke@435 3143 Register Rhi = left->as_register_hi();
duke@435 3144 #ifdef _LP64
duke@435 3145 __ sub(G0, Rlow, dest->as_register_lo());
duke@435 3146 #else
duke@435 3147 __ subcc(G0, Rlow, dest->as_register_lo());
duke@435 3148 __ subc (G0, Rhi, dest->as_register_hi());
duke@435 3149 #endif
duke@435 3150 }
duke@435 3151 }
duke@435 3152
duke@435 3153
duke@435 3154 void LIR_Assembler::fxch(int i) {
duke@435 3155 Unimplemented();
duke@435 3156 }
duke@435 3157
duke@435 3158 void LIR_Assembler::fld(int i) {
duke@435 3159 Unimplemented();
duke@435 3160 }
duke@435 3161
duke@435 3162 void LIR_Assembler::ffree(int i) {
duke@435 3163 Unimplemented();
duke@435 3164 }
duke@435 3165
duke@435 3166 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
duke@435 3167 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
duke@435 3168
duke@435 3169 // if tmp is invalid, then the function being called doesn't destroy the thread
duke@435 3170 if (tmp->is_valid()) {
duke@435 3171 __ save_thread(tmp->as_register());
duke@435 3172 }
duke@435 3173 __ call(dest, relocInfo::runtime_call_type);
duke@435 3174 __ delayed()->nop();
duke@435 3175 if (info != NULL) {
duke@435 3176 add_call_info_here(info);
duke@435 3177 }
duke@435 3178 if (tmp->is_valid()) {
duke@435 3179 __ restore_thread(tmp->as_register());
duke@435 3180 }
duke@435 3181
duke@435 3182 #ifdef ASSERT
duke@435 3183 __ verify_thread();
duke@435 3184 #endif // ASSERT
duke@435 3185 }
duke@435 3186
duke@435 3187
duke@435 3188 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
duke@435 3189 #ifdef _LP64
duke@435 3190 ShouldNotReachHere();
duke@435 3191 #endif
duke@435 3192
duke@435 3193 NEEDS_CLEANUP;
duke@435 3194 if (type == T_LONG) {
duke@435 3195 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
duke@435 3196
duke@435 3197 // (extended to allow indexed as well as constant displaced for JSR-166)
duke@435 3198 Register idx = noreg; // contains either constant offset or index
duke@435 3199
duke@435 3200 int disp = mem_addr->disp();
duke@435 3201 if (mem_addr->index() == LIR_OprFact::illegalOpr) {
duke@435 3202 if (!Assembler::is_simm13(disp)) {
duke@435 3203 idx = O7;
duke@435 3204 __ set(disp, idx);
duke@435 3205 }
duke@435 3206 } else {
duke@435 3207 assert(disp == 0, "not both indexed and disp");
duke@435 3208 idx = mem_addr->index()->as_register();
duke@435 3209 }
duke@435 3210
duke@435 3211 int null_check_offset = -1;
duke@435 3212
duke@435 3213 Register base = mem_addr->base()->as_register();
duke@435 3214 if (src->is_register() && dest->is_address()) {
duke@435 3215 // G4 is high half, G5 is low half
duke@435 3216 if (VM_Version::v9_instructions_work()) {
duke@435 3217 // clear the top bits of G5, and scale up G4
duke@435 3218 __ srl (src->as_register_lo(), 0, G5);
duke@435 3219 __ sllx(src->as_register_hi(), 32, G4);
duke@435 3220 // combine the two halves into the 64 bits of G4
duke@435 3221 __ or3(G4, G5, G4);
duke@435 3222 null_check_offset = __ offset();
duke@435 3223 if (idx == noreg) {
duke@435 3224 __ stx(G4, base, disp);
duke@435 3225 } else {
duke@435 3226 __ stx(G4, base, idx);
duke@435 3227 }
duke@435 3228 } else {
duke@435 3229 __ mov (src->as_register_hi(), G4);
duke@435 3230 __ mov (src->as_register_lo(), G5);
duke@435 3231 null_check_offset = __ offset();
duke@435 3232 if (idx == noreg) {
duke@435 3233 __ std(G4, base, disp);
duke@435 3234 } else {
duke@435 3235 __ std(G4, base, idx);
duke@435 3236 }
duke@435 3237 }
duke@435 3238 } else if (src->is_address() && dest->is_register()) {
duke@435 3239 null_check_offset = __ offset();
duke@435 3240 if (VM_Version::v9_instructions_work()) {
duke@435 3241 if (idx == noreg) {
duke@435 3242 __ ldx(base, disp, G5);
duke@435 3243 } else {
duke@435 3244 __ ldx(base, idx, G5);
duke@435 3245 }
duke@435 3246 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
duke@435 3247 __ mov (G5, dest->as_register_lo()); // copy low half into lo
duke@435 3248 } else {
duke@435 3249 if (idx == noreg) {
duke@435 3250 __ ldd(base, disp, G4);
duke@435 3251 } else {
duke@435 3252 __ ldd(base, idx, G4);
duke@435 3253 }
duke@435 3254 // G4 is high half, G5 is low half
duke@435 3255 __ mov (G4, dest->as_register_hi());
duke@435 3256 __ mov (G5, dest->as_register_lo());
duke@435 3257 }
duke@435 3258 } else {
duke@435 3259 Unimplemented();
duke@435 3260 }
duke@435 3261 if (info != NULL) {
duke@435 3262 add_debug_info_for_null_check(null_check_offset, info);
duke@435 3263 }
duke@435 3264
duke@435 3265 } else {
duke@435 3266 // use normal move for all other volatiles since they don't need
duke@435 3267 // special handling to remain atomic.
iveresov@2344 3268 move_op(src, dest, type, lir_patch_none, info, false, false, false);
duke@435 3269 }
duke@435 3270 }
duke@435 3271
duke@435 3272 void LIR_Assembler::membar() {
duke@435 3273 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
duke@435 3274 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
duke@435 3275 }
duke@435 3276
duke@435 3277 void LIR_Assembler::membar_acquire() {
duke@435 3278 // no-op on TSO
duke@435 3279 }
duke@435 3280
duke@435 3281 void LIR_Assembler::membar_release() {
duke@435 3282 // no-op on TSO
duke@435 3283 }
duke@435 3284
jiangli@3592 3285 void LIR_Assembler::membar_loadload() {
jiangli@3592 3286 // no-op
jiangli@3592 3287 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
jiangli@3592 3288 }
jiangli@3592 3289
jiangli@3592 3290 void LIR_Assembler::membar_storestore() {
jiangli@3592 3291 // no-op
jiangli@3592 3292 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
jiangli@3592 3293 }
jiangli@3592 3294
jiangli@3592 3295 void LIR_Assembler::membar_loadstore() {
jiangli@3592 3296 // no-op
jiangli@3592 3297 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
jiangli@3592 3298 }
jiangli@3592 3299
jiangli@3592 3300 void LIR_Assembler::membar_storeload() {
jiangli@3592 3301 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
jiangli@3592 3302 }
jiangli@3592 3303
jiangli@3592 3304
iveresov@2138 3305 // Pack two sequential registers containing 32 bit values
duke@435 3306 // into a single 64 bit register.
iveresov@2138 3307 // src and src->successor() are packed into dst
iveresov@2138 3308 // src and dst may be the same register.
iveresov@2138 3309 // Note: src is destroyed
iveresov@2138 3310 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
iveresov@2138 3311 Register rs = src->as_register();
iveresov@2138 3312 Register rd = dst->as_register_lo();
duke@435 3313 __ sllx(rs, 32, rs);
duke@435 3314 __ srl(rs->successor(), 0, rs->successor());
duke@435 3315 __ or3(rs, rs->successor(), rd);
duke@435 3316 }
duke@435 3317
iveresov@2138 3318 // Unpack a 64 bit value in a register into
duke@435 3319 // two sequential registers.
iveresov@2138 3320 // src is unpacked into dst and dst->successor()
iveresov@2138 3321 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
iveresov@2138 3322 Register rs = src->as_register_lo();
iveresov@2138 3323 Register rd = dst->as_register_hi();
iveresov@2138 3324 assert_different_registers(rs, rd, rd->successor());
iveresov@2138 3325 __ srlx(rs, 32, rd);
iveresov@2138 3326 __ srl (rs, 0, rd->successor());
duke@435 3327 }
duke@435 3328
duke@435 3329
duke@435 3330 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
duke@435 3331 LIR_Address* addr = addr_opr->as_address_ptr();
duke@435 3332 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
iveresov@2138 3333
iveresov@2138 3334 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
duke@435 3335 }
duke@435 3336
duke@435 3337
duke@435 3338 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
duke@435 3339 assert(result_reg->is_register(), "check");
duke@435 3340 __ mov(G2_thread, result_reg->as_register());
duke@435 3341 }
duke@435 3342
duke@435 3343
duke@435 3344 void LIR_Assembler::peephole(LIR_List* lir) {
duke@435 3345 LIR_OpList* inst = lir->instructions_list();
duke@435 3346 for (int i = 0; i < inst->length(); i++) {
duke@435 3347 LIR_Op* op = inst->at(i);
duke@435 3348 switch (op->code()) {
duke@435 3349 case lir_cond_float_branch:
duke@435 3350 case lir_branch: {
duke@435 3351 LIR_OpBranch* branch = op->as_OpBranch();
duke@435 3352 assert(branch->info() == NULL, "shouldn't be state on branches anymore");
duke@435 3353 LIR_Op* delay_op = NULL;
duke@435 3354 // we'd like to be able to pull following instructions into
duke@435 3355 // this slot but we don't know enough to do it safely yet so
duke@435 3356 // only optimize block to block control flow.
duke@435 3357 if (LIRFillDelaySlots && branch->block()) {
duke@435 3358 LIR_Op* prev = inst->at(i - 1);
duke@435 3359 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
duke@435 3360 // swap previous instruction into delay slot
duke@435 3361 inst->at_put(i - 1, op);
duke@435 3362 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
duke@435 3363 #ifndef PRODUCT
duke@435 3364 if (LIRTracePeephole) {
duke@435 3365 tty->print_cr("delayed");
duke@435 3366 inst->at(i - 1)->print();
duke@435 3367 inst->at(i)->print();
twisti@1919 3368 tty->cr();
duke@435 3369 }
duke@435 3370 #endif
duke@435 3371 continue;
duke@435 3372 }
duke@435 3373 }
duke@435 3374
duke@435 3375 if (!delay_op) {
duke@435 3376 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
duke@435 3377 }
duke@435 3378 inst->insert_before(i + 1, delay_op);
duke@435 3379 break;
duke@435 3380 }
duke@435 3381 case lir_static_call:
duke@435 3382 case lir_virtual_call:
duke@435 3383 case lir_icvirtual_call:
twisti@1919 3384 case lir_optvirtual_call:
twisti@1919 3385 case lir_dynamic_call: {
duke@435 3386 LIR_Op* prev = inst->at(i - 1);
duke@435 3387 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
duke@435 3388 (op->code() != lir_virtual_call ||
duke@435 3389 !prev->result_opr()->is_single_cpu() ||
duke@435 3390 prev->result_opr()->as_register() != O0) &&
duke@435 3391 LIR_Assembler::is_single_instruction(prev)) {
duke@435 3392 // Only moves without info can be put into the delay slot.
duke@435 3393 // Also don't allow the setup of the receiver in the delay
duke@435 3394 // slot for vtable calls.
duke@435 3395 inst->at_put(i - 1, op);
duke@435 3396 inst->at_put(i, new LIR_OpDelay(prev, op->info()));
duke@435 3397 #ifndef PRODUCT
duke@435 3398 if (LIRTracePeephole) {
duke@435 3399 tty->print_cr("delayed");
duke@435 3400 inst->at(i - 1)->print();
duke@435 3401 inst->at(i)->print();
twisti@1919 3402 tty->cr();
duke@435 3403 }
duke@435 3404 #endif
iveresov@2138 3405 } else {
iveresov@2138 3406 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
iveresov@2138 3407 inst->insert_before(i + 1, delay_op);
iveresov@2138 3408 i++;
duke@435 3409 }
duke@435 3410
iveresov@2138 3411 #if defined(TIERED) && !defined(_LP64)
iveresov@2138 3412 // fixup the return value from G1 to O0/O1 for long returns.
iveresov@2138 3413 // It's done here instead of in LIRGenerator because there's
iveresov@2138 3414 // such a mismatch between the single reg and double reg
iveresov@2138 3415 // calling convention.
iveresov@2138 3416 LIR_OpJavaCall* callop = op->as_OpJavaCall();
iveresov@2138 3417 if (callop->result_opr() == FrameMap::out_long_opr) {
iveresov@2138 3418 LIR_OpJavaCall* call;
iveresov@2138 3419 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
iveresov@2138 3420 for (int a = 0; a < arguments->length(); a++) {
iveresov@2138 3421 arguments[a] = callop->arguments()[a];
iveresov@2138 3422 }
iveresov@2138 3423 if (op->code() == lir_virtual_call) {
iveresov@2138 3424 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
iveresov@2138 3425 callop->vtable_offset(), arguments, callop->info());
iveresov@2138 3426 } else {
iveresov@2138 3427 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
iveresov@2138 3428 callop->addr(), arguments, callop->info());
iveresov@2138 3429 }
iveresov@2138 3430 inst->at_put(i - 1, call);
iveresov@2138 3431 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
iveresov@2138 3432 T_LONG, lir_patch_none, NULL));
iveresov@2138 3433 }
iveresov@2138 3434 #endif
duke@435 3435 break;
duke@435 3436 }
duke@435 3437 }
duke@435 3438 }
duke@435 3439 }
duke@435 3440
duke@435 3441
duke@435 3442
duke@435 3443
duke@435 3444 #undef __

mercurial