src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp

Fri, 20 Mar 2009 23:19:36 -0700

author
jrose
date
Fri, 20 Mar 2009 23:19:36 -0700
changeset 1100
c89f86385056
parent 1079
c517646eef23
child 1162
6b2273dd6fa9
permissions
-rw-r--r--

6814659: separable cleanups and subroutines for 6655638
Summary: preparatory but separable changes for method handles
Reviewed-by: kvn, never

     1 /*
     2  * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_c1_LIRAssembler_sparc.cpp.incl"
    28 #define __ _masm->
    31 //------------------------------------------------------------
    34 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
    35   if (opr->is_constant()) {
    36     LIR_Const* constant = opr->as_constant_ptr();
    37     switch (constant->type()) {
    38       case T_INT: {
    39         jint value = constant->as_jint();
    40         return Assembler::is_simm13(value);
    41       }
    43       default:
    44         return false;
    45     }
    46   }
    47   return false;
    48 }
    51 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
    52   switch (op->code()) {
    53     case lir_null_check:
    54     return true;
    57     case lir_add:
    58     case lir_ushr:
    59     case lir_shr:
    60     case lir_shl:
    61       // integer shifts and adds are always one instruction
    62       return op->result_opr()->is_single_cpu();
    65     case lir_move: {
    66       LIR_Op1* op1 = op->as_Op1();
    67       LIR_Opr src = op1->in_opr();
    68       LIR_Opr dst = op1->result_opr();
    70       if (src == dst) {
    71         NEEDS_CLEANUP;
    72         // this works around a problem where moves with the same src and dst
    73         // end up in the delay slot and then the assembler swallows the mov
    74         // since it has no effect and then it complains because the delay slot
    75         // is empty.  returning false stops the optimizer from putting this in
    76         // the delay slot
    77         return false;
    78       }
    80       // don't put moves involving oops into the delay slot since the VerifyOops code
    81       // will make it much larger than a single instruction.
    82       if (VerifyOops) {
    83         return false;
    84       }
    86       if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
    87           ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
    88         return false;
    89       }
    91       if (dst->is_register()) {
    92         if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
    93           return !PatchALot;
    94         } else if (src->is_single_stack()) {
    95           return true;
    96         }
    97       }
    99       if (src->is_register()) {
   100         if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
   101           return !PatchALot;
   102         } else if (dst->is_single_stack()) {
   103           return true;
   104         }
   105       }
   107       if (dst->is_register() &&
   108           ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
   109            (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
   110         return true;
   111       }
   113       return false;
   114     }
   116     default:
   117       return false;
   118   }
   119   ShouldNotReachHere();
   120 }
   123 LIR_Opr LIR_Assembler::receiverOpr() {
   124   return FrameMap::O0_oop_opr;
   125 }
   128 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
   129   return FrameMap::I0_oop_opr;
   130 }
   133 LIR_Opr LIR_Assembler::osrBufferPointer() {
   134   return FrameMap::I0_opr;
   135 }
   138 int LIR_Assembler::initial_frame_size_in_bytes() {
   139   return in_bytes(frame_map()->framesize_in_bytes());
   140 }
   143 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
   144 // we fetch the class of the receiver (O0) and compare it with the cached class.
   145 // If they do not match we jump to slow case.
   146 int LIR_Assembler::check_icache() {
   147   int offset = __ offset();
   148   __ inline_cache_check(O0, G5_inline_cache_reg);
   149   return offset;
   150 }
   153 void LIR_Assembler::osr_entry() {
   154   // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
   155   //
   156   //   1. Create a new compiled activation.
   157   //   2. Initialize local variables in the compiled activation.  The expression stack must be empty
   158   //      at the osr_bci; it is not initialized.
   159   //   3. Jump to the continuation address in compiled code to resume execution.
   161   // OSR entry point
   162   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
   163   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
   164   ValueStack* entry_state = osr_entry->end()->state();
   165   int number_of_locks = entry_state->locks_size();
   167   // Create a frame for the compiled activation.
   168   __ build_frame(initial_frame_size_in_bytes());
   170   // OSR buffer is
   171   //
   172   // locals[nlocals-1..0]
   173   // monitors[number_of_locks-1..0]
   174   //
   175   // locals is a direct copy of the interpreter frame so in the osr buffer
   176   // so first slot in the local array is the last local from the interpreter
   177   // and last slot is local[0] (receiver) from the interpreter
   178   //
   179   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
   180   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
   181   // in the interpreter frame (the method lock if a sync method)
   183   // Initialize monitors in the compiled activation.
   184   //   I0: pointer to osr buffer
   185   //
   186   // All other registers are dead at this point and the locals will be
   187   // copied into place by code emitted in the IR.
   189   Register OSR_buf = osrBufferPointer()->as_register();
   190   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
   191     int monitor_offset = BytesPerWord * method()->max_locals() +
   192       (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
   193     for (int i = 0; i < number_of_locks; i++) {
   194       int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
   195 #ifdef ASSERT
   196       // verify the interpreter's monitor has a non-null object
   197       {
   198         Label L;
   199         __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7);
   200         __ cmp(G0, O7);
   201         __ br(Assembler::notEqual, false, Assembler::pt, L);
   202         __ delayed()->nop();
   203         __ stop("locked object is NULL");
   204         __ bind(L);
   205       }
   206 #endif // ASSERT
   207       // Copy the lock field into the compiled activation.
   208       __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::lock_offset_in_bytes()), O7);
   209       __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
   210       __ ld_ptr(Address(OSR_buf, 0, slot_offset + BasicObjectLock::obj_offset_in_bytes()), O7);
   211       __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
   212     }
   213   }
   214 }
   217 // Optimized Library calls
   218 // This is the fast version of java.lang.String.compare; it has not
   219 // OSR-entry and therefore, we generate a slow version for OSR's
   220 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
   221   Register str0 = left->as_register();
   222   Register str1 = right->as_register();
   224   Label Ldone;
   226   Register result = dst->as_register();
   227   {
   228     // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0
   229     // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1
   230     // Also, get string0.count-string1.count in o7 and get the condition code set
   231     // Note: some instructions have been hoisted for better instruction scheduling
   233     Register tmp0 = L0;
   234     Register tmp1 = L1;
   235     Register tmp2 = L2;
   237     int  value_offset = java_lang_String:: value_offset_in_bytes(); // char array
   238     int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
   239     int  count_offset = java_lang_String:: count_offset_in_bytes();
   241     __ ld_ptr(Address(str0, 0,  value_offset), tmp0);
   242     __ ld(Address(str0, 0, offset_offset), tmp2);
   243     __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
   244     __ ld(Address(str0, 0, count_offset), str0);
   245     __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
   247     // str1 may be null
   248     add_debug_info_for_null_check_here(info);
   250     __ ld_ptr(Address(str1, 0,  value_offset), tmp1);
   251     __ add(tmp0, tmp2, tmp0);
   253     __ ld(Address(str1, 0, offset_offset), tmp2);
   254     __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
   255     __ ld(Address(str1, 0, count_offset), str1);
   256     __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
   257     __ subcc(str0, str1, O7);
   258     __ add(tmp1, tmp2, tmp1);
   259   }
   261   {
   262     // Compute the minimum of the string lengths, scale it and store it in limit
   263     Register count0 = I0;
   264     Register count1 = I1;
   265     Register limit  = L3;
   267     Label Lskip;
   268     __ sll(count0, exact_log2(sizeof(jchar)), limit);             // string0 is shorter
   269     __ br(Assembler::greater, true, Assembler::pt, Lskip);
   270     __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit);  // string1 is shorter
   271     __ bind(Lskip);
   273     // If either string is empty (or both of them) the result is the difference in lengths
   274     __ cmp(limit, 0);
   275     __ br(Assembler::equal, true, Assembler::pn, Ldone);
   276     __ delayed()->mov(O7, result);  // result is difference in lengths
   277   }
   279   {
   280     // Neither string is empty
   281     Label Lloop;
   283     Register base0 = L0;
   284     Register base1 = L1;
   285     Register chr0  = I0;
   286     Register chr1  = I1;
   287     Register limit = L3;
   289     // Shift base0 and base1 to the end of the arrays, negate limit
   290     __ add(base0, limit, base0);
   291     __ add(base1, limit, base1);
   292     __ neg(limit);  // limit = -min{string0.count, strin1.count}
   294     __ lduh(base0, limit, chr0);
   295     __ bind(Lloop);
   296     __ lduh(base1, limit, chr1);
   297     __ subcc(chr0, chr1, chr0);
   298     __ br(Assembler::notZero, false, Assembler::pn, Ldone);
   299     assert(chr0 == result, "result must be pre-placed");
   300     __ delayed()->inccc(limit, sizeof(jchar));
   301     __ br(Assembler::notZero, true, Assembler::pt, Lloop);
   302     __ delayed()->lduh(base0, limit, chr0);
   303   }
   305   // If strings are equal up to min length, return the length difference.
   306   __ mov(O7, result);
   308   // Otherwise, return the difference between the first mismatched chars.
   309   __ bind(Ldone);
   310 }
   313 // --------------------------------------------------------------------------------------------
   315 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
   316   if (!GenerateSynchronizationCode) return;
   318   Register obj_reg = obj_opr->as_register();
   319   Register lock_reg = lock_opr->as_register();
   321   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
   322   Register reg = mon_addr.base();
   323   int offset = mon_addr.disp();
   324   // compute pointer to BasicLock
   325   if (mon_addr.is_simm13()) {
   326     __ add(reg, offset, lock_reg);
   327   }
   328   else {
   329     __ set(offset, lock_reg);
   330     __ add(reg, lock_reg, lock_reg);
   331   }
   332   // unlock object
   333   MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
   334   // _slow_case_stubs->append(slow_case);
   335   // temporary fix: must be created after exceptionhandler, therefore as call stub
   336   _slow_case_stubs->append(slow_case);
   337   if (UseFastLocking) {
   338     // try inlined fast unlocking first, revert to slow locking if it fails
   339     // note: lock_reg points to the displaced header since the displaced header offset is 0!
   340     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
   341     __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
   342   } else {
   343     // always do slow unlocking
   344     // note: the slow unlocking code could be inlined here, however if we use
   345     //       slow unlocking, speed doesn't matter anyway and this solution is
   346     //       simpler and requires less duplicated code - additionally, the
   347     //       slow unlocking code is the same in either case which simplifies
   348     //       debugging
   349     __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
   350     __ delayed()->nop();
   351   }
   352   // done
   353   __ bind(*slow_case->continuation());
   354 }
   357 void LIR_Assembler::emit_exception_handler() {
   358   // if the last instruction is a call (typically to do a throw which
   359   // is coming at the end after block reordering) the return address
   360   // must still point into the code area in order to avoid assertion
   361   // failures when searching for the corresponding bci => add a nop
   362   // (was bug 5/14/1999 - gri)
   363   __ nop();
   365   // generate code for exception handler
   366   ciMethod* method = compilation()->method();
   368   address handler_base = __ start_a_stub(exception_handler_size);
   370   if (handler_base == NULL) {
   371     // not enough space left for the handler
   372     bailout("exception handler overflow");
   373     return;
   374   }
   375 #ifdef ASSERT
   376   int offset = code_offset();
   377 #endif // ASSERT
   378   compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
   381   if (compilation()->has_exception_handlers() || JvmtiExport::can_post_exceptions()) {
   382     __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
   383     __ delayed()->nop();
   384   }
   386   __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
   387   __ delayed()->nop();
   388   debug_only(__ stop("should have gone to the caller");)
   389   assert(code_offset() - offset <= exception_handler_size, "overflow");
   391   __ end_a_stub();
   392 }
   394 void LIR_Assembler::emit_deopt_handler() {
   395   // if the last instruction is a call (typically to do a throw which
   396   // is coming at the end after block reordering) the return address
   397   // must still point into the code area in order to avoid assertion
   398   // failures when searching for the corresponding bci => add a nop
   399   // (was bug 5/14/1999 - gri)
   400   __ nop();
   402   // generate code for deopt handler
   403   ciMethod* method = compilation()->method();
   404   address handler_base = __ start_a_stub(deopt_handler_size);
   405   if (handler_base == NULL) {
   406     // not enough space left for the handler
   407     bailout("deopt handler overflow");
   408     return;
   409   }
   410 #ifdef ASSERT
   411   int offset = code_offset();
   412 #endif // ASSERT
   413   compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
   415   Address deopt_blob(G3_scratch, SharedRuntime::deopt_blob()->unpack());
   417   __ JUMP(deopt_blob, 0); // sethi;jmp
   418   __ delayed()->nop();
   420   assert(code_offset() - offset <= deopt_handler_size, "overflow");
   422   debug_only(__ stop("should have gone to the caller");)
   424   __ end_a_stub();
   425 }
   428 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
   429   if (o == NULL) {
   430     __ set(NULL_WORD, reg);
   431   } else {
   432     int oop_index = __ oop_recorder()->find_index(o);
   433     RelocationHolder rspec = oop_Relocation::spec(oop_index);
   434     __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
   435   }
   436 }
   439 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
   440   // Allocate a new index in oop table to hold the oop once it's been patched
   441   int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
   442   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
   444   Address addr = Address(reg, address(NULL), oop_Relocation::spec(oop_index));
   445   assert(addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
   446   // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
   447   // NULL will be dynamically patched later and the patched value may be large.  We must
   448   // therefore generate the sethi/add as a placeholders
   449   __ sethi(addr, true);
   450   __ add(addr, reg, 0);
   452   patching_epilog(patch, lir_patch_normal, reg, info);
   453 }
   456 void LIR_Assembler::emit_op3(LIR_Op3* op) {
   457   Register Rdividend = op->in_opr1()->as_register();
   458   Register Rdivisor  = noreg;
   459   Register Rscratch  = op->in_opr3()->as_register();
   460   Register Rresult   = op->result_opr()->as_register();
   461   int divisor = -1;
   463   if (op->in_opr2()->is_register()) {
   464     Rdivisor = op->in_opr2()->as_register();
   465   } else {
   466     divisor = op->in_opr2()->as_constant_ptr()->as_jint();
   467     assert(Assembler::is_simm13(divisor), "can only handle simm13");
   468   }
   470   assert(Rdividend != Rscratch, "");
   471   assert(Rdivisor  != Rscratch, "");
   472   assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
   474   if (Rdivisor == noreg && is_power_of_2(divisor)) {
   475     // convert division by a power of two into some shifts and logical operations
   476     if (op->code() == lir_idiv) {
   477       if (divisor == 2) {
   478         __ srl(Rdividend, 31, Rscratch);
   479       } else {
   480         __ sra(Rdividend, 31, Rscratch);
   481         __ and3(Rscratch, divisor - 1, Rscratch);
   482       }
   483       __ add(Rdividend, Rscratch, Rscratch);
   484       __ sra(Rscratch, log2_intptr(divisor), Rresult);
   485       return;
   486     } else {
   487       if (divisor == 2) {
   488         __ srl(Rdividend, 31, Rscratch);
   489       } else {
   490         __ sra(Rdividend, 31, Rscratch);
   491         __ and3(Rscratch, divisor - 1,Rscratch);
   492       }
   493       __ add(Rdividend, Rscratch, Rscratch);
   494       __ andn(Rscratch, divisor - 1,Rscratch);
   495       __ sub(Rdividend, Rscratch, Rresult);
   496       return;
   497     }
   498   }
   500   __ sra(Rdividend, 31, Rscratch);
   501   __ wry(Rscratch);
   502   if (!VM_Version::v9_instructions_work()) {
   503     // v9 doesn't require these nops
   504     __ nop();
   505     __ nop();
   506     __ nop();
   507     __ nop();
   508   }
   510   add_debug_info_for_div0_here(op->info());
   512   if (Rdivisor != noreg) {
   513     __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
   514   } else {
   515     assert(Assembler::is_simm13(divisor), "can only handle simm13");
   516     __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
   517   }
   519   Label skip;
   520   __ br(Assembler::overflowSet, true, Assembler::pn, skip);
   521   __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
   522   __ bind(skip);
   524   if (op->code() == lir_irem) {
   525     if (Rdivisor != noreg) {
   526       __ smul(Rscratch, Rdivisor, Rscratch);
   527     } else {
   528       __ smul(Rscratch, divisor, Rscratch);
   529     }
   530     __ sub(Rdividend, Rscratch, Rresult);
   531   }
   532 }
   535 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
   536 #ifdef ASSERT
   537   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
   538   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
   539   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
   540 #endif
   541   assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
   543   if (op->cond() == lir_cond_always) {
   544     __ br(Assembler::always, false, Assembler::pt, *(op->label()));
   545   } else if (op->code() == lir_cond_float_branch) {
   546     assert(op->ublock() != NULL, "must have unordered successor");
   547     bool is_unordered = (op->ublock() == op->block());
   548     Assembler::Condition acond;
   549     switch (op->cond()) {
   550       case lir_cond_equal:         acond = Assembler::f_equal;    break;
   551       case lir_cond_notEqual:      acond = Assembler::f_notEqual; break;
   552       case lir_cond_less:          acond = (is_unordered ? Assembler::f_unorderedOrLess          : Assembler::f_less);           break;
   553       case lir_cond_greater:       acond = (is_unordered ? Assembler::f_unorderedOrGreater       : Assembler::f_greater);        break;
   554       case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
   555       case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
   556       default :                         ShouldNotReachHere();
   557     };
   559     if (!VM_Version::v9_instructions_work()) {
   560       __ nop();
   561     }
   562     __ fb( acond, false, Assembler::pn, *(op->label()));
   563   } else {
   564     assert (op->code() == lir_branch, "just checking");
   566     Assembler::Condition acond;
   567     switch (op->cond()) {
   568       case lir_cond_equal:        acond = Assembler::equal;                break;
   569       case lir_cond_notEqual:     acond = Assembler::notEqual;             break;
   570       case lir_cond_less:         acond = Assembler::less;                 break;
   571       case lir_cond_lessEqual:    acond = Assembler::lessEqual;            break;
   572       case lir_cond_greaterEqual: acond = Assembler::greaterEqual;         break;
   573       case lir_cond_greater:      acond = Assembler::greater;              break;
   574       case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned; break;
   575       case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;    break;
   576       default:                         ShouldNotReachHere();
   577     };
   579     // sparc has different condition codes for testing 32-bit
   580     // vs. 64-bit values.  We could always test xcc is we could
   581     // guarantee that 32-bit loads always sign extended but that isn't
   582     // true and since sign extension isn't free, it would impose a
   583     // slight cost.
   584 #ifdef _LP64
   585     if  (op->type() == T_INT) {
   586       __ br(acond, false, Assembler::pn, *(op->label()));
   587     } else
   588 #endif
   589       __ brx(acond, false, Assembler::pn, *(op->label()));
   590   }
   591   // The peephole pass fills the delay slot
   592 }
   595 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
   596   Bytecodes::Code code = op->bytecode();
   597   LIR_Opr dst = op->result_opr();
   599   switch(code) {
   600     case Bytecodes::_i2l: {
   601       Register rlo  = dst->as_register_lo();
   602       Register rhi  = dst->as_register_hi();
   603       Register rval = op->in_opr()->as_register();
   604 #ifdef _LP64
   605       __ sra(rval, 0, rlo);
   606 #else
   607       __ mov(rval, rlo);
   608       __ sra(rval, BitsPerInt-1, rhi);
   609 #endif
   610       break;
   611     }
   612     case Bytecodes::_i2d:
   613     case Bytecodes::_i2f: {
   614       bool is_double = (code == Bytecodes::_i2d);
   615       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
   616       FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
   617       FloatRegister rsrc = op->in_opr()->as_float_reg();
   618       if (rsrc != rdst) {
   619         __ fmov(FloatRegisterImpl::S, rsrc, rdst);
   620       }
   621       __ fitof(w, rdst, rdst);
   622       break;
   623     }
   624     case Bytecodes::_f2i:{
   625       FloatRegister rsrc = op->in_opr()->as_float_reg();
   626       Address       addr = frame_map()->address_for_slot(dst->single_stack_ix());
   627       Label L;
   628       // result must be 0 if value is NaN; test by comparing value to itself
   629       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
   630       if (!VM_Version::v9_instructions_work()) {
   631         __ nop();
   632       }
   633       __ fb(Assembler::f_unordered, true, Assembler::pn, L);
   634       __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
   635       __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
   636       // move integer result from float register to int register
   637       __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
   638       __ bind (L);
   639       break;
   640     }
   641     case Bytecodes::_l2i: {
   642       Register rlo  = op->in_opr()->as_register_lo();
   643       Register rhi  = op->in_opr()->as_register_hi();
   644       Register rdst = dst->as_register();
   645 #ifdef _LP64
   646       __ sra(rlo, 0, rdst);
   647 #else
   648       __ mov(rlo, rdst);
   649 #endif
   650       break;
   651     }
   652     case Bytecodes::_d2f:
   653     case Bytecodes::_f2d: {
   654       bool is_double = (code == Bytecodes::_f2d);
   655       assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
   656       LIR_Opr val = op->in_opr();
   657       FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
   658       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
   659       FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
   660       FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
   661       __ ftof(vw, dw, rval, rdst);
   662       break;
   663     }
   664     case Bytecodes::_i2s:
   665     case Bytecodes::_i2b: {
   666       Register rval = op->in_opr()->as_register();
   667       Register rdst = dst->as_register();
   668       int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
   669       __ sll (rval, shift, rdst);
   670       __ sra (rdst, shift, rdst);
   671       break;
   672     }
   673     case Bytecodes::_i2c: {
   674       Register rval = op->in_opr()->as_register();
   675       Register rdst = dst->as_register();
   676       int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
   677       __ sll (rval, shift, rdst);
   678       __ srl (rdst, shift, rdst);
   679       break;
   680     }
   682     default: ShouldNotReachHere();
   683   }
   684 }
   687 void LIR_Assembler::align_call(LIR_Code) {
   688   // do nothing since all instructions are word aligned on sparc
   689 }
   692 void LIR_Assembler::call(address entry, relocInfo::relocType rtype, CodeEmitInfo* info) {
   693   __ call(entry, rtype);
   694   // the peephole pass fills the delay slot
   695 }
   698 void LIR_Assembler::ic_call(address entry, CodeEmitInfo* info) {
   699   RelocationHolder rspec = virtual_call_Relocation::spec(pc());
   700   __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
   701   __ relocate(rspec);
   702   __ call(entry, relocInfo::none);
   703   // the peephole pass fills the delay slot
   704 }
   707 void LIR_Assembler::vtable_call(int vtable_offset, CodeEmitInfo* info) {
   708   add_debug_info_for_null_check_here(info);
   709   __ ld_ptr(Address(O0, 0,  oopDesc::klass_offset_in_bytes()), G3_scratch);
   710   if (__ is_simm13(vtable_offset) ) {
   711     __ ld_ptr(G3_scratch, vtable_offset, G5_method);
   712   } else {
   713     // This will generate 2 instructions
   714     __ set(vtable_offset, G5_method);
   715     // ld_ptr, set_hi, set
   716     __ ld_ptr(G3_scratch, G5_method, G5_method);
   717   }
   718   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch);
   719   __ callr(G3_scratch, G0);
   720   // the peephole pass fills the delay slot
   721 }
   724 // load with 32-bit displacement
   725 int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
   726   int load_offset = code_offset();
   727   if (Assembler::is_simm13(disp)) {
   728     if (info != NULL) add_debug_info_for_null_check_here(info);
   729     switch(ld_type) {
   730       case T_BOOLEAN: // fall through
   731       case T_BYTE  : __ ldsb(s, disp, d); break;
   732       case T_CHAR  : __ lduh(s, disp, d); break;
   733       case T_SHORT : __ ldsh(s, disp, d); break;
   734       case T_INT   : __ ld(s, disp, d); break;
   735       case T_ADDRESS:// fall through
   736       case T_ARRAY : // fall through
   737       case T_OBJECT: __ ld_ptr(s, disp, d); break;
   738       default      : ShouldNotReachHere();
   739     }
   740   } else {
   741     __ sethi(disp & ~0x3ff, O7, true);
   742     __ add(O7, disp & 0x3ff, O7);
   743     if (info != NULL) add_debug_info_for_null_check_here(info);
   744     load_offset = code_offset();
   745     switch(ld_type) {
   746       case T_BOOLEAN: // fall through
   747       case T_BYTE  : __ ldsb(s, O7, d); break;
   748       case T_CHAR  : __ lduh(s, O7, d); break;
   749       case T_SHORT : __ ldsh(s, O7, d); break;
   750       case T_INT   : __ ld(s, O7, d); break;
   751       case T_ADDRESS:// fall through
   752       case T_ARRAY : // fall through
   753       case T_OBJECT: __ ld_ptr(s, O7, d); break;
   754       default      : ShouldNotReachHere();
   755     }
   756   }
   757   if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d);
   758   return load_offset;
   759 }
   762 // store with 32-bit displacement
   763 void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
   764   if (Assembler::is_simm13(offset)) {
   765     if (info != NULL)  add_debug_info_for_null_check_here(info);
   766     switch (type) {
   767       case T_BOOLEAN: // fall through
   768       case T_BYTE  : __ stb(value, base, offset); break;
   769       case T_CHAR  : __ sth(value, base, offset); break;
   770       case T_SHORT : __ sth(value, base, offset); break;
   771       case T_INT   : __ stw(value, base, offset); break;
   772       case T_ADDRESS:// fall through
   773       case T_ARRAY : // fall through
   774       case T_OBJECT: __ st_ptr(value, base, offset); break;
   775       default      : ShouldNotReachHere();
   776     }
   777   } else {
   778     __ sethi(offset & ~0x3ff, O7, true);
   779     __ add(O7, offset & 0x3ff, O7);
   780     if (info != NULL) add_debug_info_for_null_check_here(info);
   781     switch (type) {
   782       case T_BOOLEAN: // fall through
   783       case T_BYTE  : __ stb(value, base, O7); break;
   784       case T_CHAR  : __ sth(value, base, O7); break;
   785       case T_SHORT : __ sth(value, base, O7); break;
   786       case T_INT   : __ stw(value, base, O7); break;
   787       case T_ADDRESS:// fall through
   788       case T_ARRAY : //fall through
   789       case T_OBJECT: __ st_ptr(value, base, O7); break;
   790       default      : ShouldNotReachHere();
   791     }
   792   }
   793   // Note: Do the store before verification as the code might be patched!
   794   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value);
   795 }
   798 // load float with 32-bit displacement
   799 void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
   800   FloatRegisterImpl::Width w;
   801   switch(ld_type) {
   802     case T_FLOAT : w = FloatRegisterImpl::S; break;
   803     case T_DOUBLE: w = FloatRegisterImpl::D; break;
   804     default      : ShouldNotReachHere();
   805   }
   807   if (Assembler::is_simm13(disp)) {
   808     if (info != NULL) add_debug_info_for_null_check_here(info);
   809     if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) {
   810       __ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor());
   811       __ ldf(FloatRegisterImpl::S, s, disp               , d);
   812     } else {
   813       __ ldf(w, s, disp, d);
   814     }
   815   } else {
   816     __ sethi(disp & ~0x3ff, O7, true);
   817     __ add(O7, disp & 0x3ff, O7);
   818     if (info != NULL) add_debug_info_for_null_check_here(info);
   819     __ ldf(w, s, O7, d);
   820   }
   821 }
   824 // store float with 32-bit displacement
   825 void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
   826   FloatRegisterImpl::Width w;
   827   switch(type) {
   828     case T_FLOAT : w = FloatRegisterImpl::S; break;
   829     case T_DOUBLE: w = FloatRegisterImpl::D; break;
   830     default      : ShouldNotReachHere();
   831   }
   833   if (Assembler::is_simm13(offset)) {
   834     if (info != NULL) add_debug_info_for_null_check_here(info);
   835     if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) {
   836       __ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord);
   837       __ stf(FloatRegisterImpl::S, value             , base, offset);
   838     } else {
   839       __ stf(w, value, base, offset);
   840     }
   841   } else {
   842     __ sethi(offset & ~0x3ff, O7, true);
   843     __ add(O7, offset & 0x3ff, O7);
   844     if (info != NULL) add_debug_info_for_null_check_here(info);
   845     __ stf(w, value, O7, base);
   846   }
   847 }
   850 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) {
   851   int store_offset;
   852   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
   853     assert(!unaligned, "can't handle this");
   854     // for offsets larger than a simm13 we setup the offset in O7
   855     __ sethi(offset & ~0x3ff, O7, true);
   856     __ add(O7, offset & 0x3ff, O7);
   857     store_offset = store(from_reg, base, O7, type);
   858   } else {
   859     if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
   860     store_offset = code_offset();
   861     switch (type) {
   862       case T_BOOLEAN: // fall through
   863       case T_BYTE  : __ stb(from_reg->as_register(), base, offset); break;
   864       case T_CHAR  : __ sth(from_reg->as_register(), base, offset); break;
   865       case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
   866       case T_INT   : __ stw(from_reg->as_register(), base, offset); break;
   867       case T_LONG  :
   868 #ifdef _LP64
   869         if (unaligned || PatchALot) {
   870           __ srax(from_reg->as_register_lo(), 32, O7);
   871           __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
   872           __ stw(O7,                         base, offset + hi_word_offset_in_bytes);
   873         } else {
   874           __ stx(from_reg->as_register_lo(), base, offset);
   875         }
   876 #else
   877         assert(Assembler::is_simm13(offset + 4), "must be");
   878         __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
   879         __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
   880 #endif
   881         break;
   882       case T_ADDRESS:// fall through
   883       case T_ARRAY : // fall through
   884       case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break;
   885       case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
   886       case T_DOUBLE:
   887         {
   888           FloatRegister reg = from_reg->as_double_reg();
   889           // split unaligned stores
   890           if (unaligned || PatchALot) {
   891             assert(Assembler::is_simm13(offset + 4), "must be");
   892             __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
   893             __ stf(FloatRegisterImpl::S, reg,              base, offset);
   894           } else {
   895             __ stf(FloatRegisterImpl::D, reg, base, offset);
   896           }
   897           break;
   898         }
   899       default      : ShouldNotReachHere();
   900     }
   901   }
   902   return store_offset;
   903 }
   906 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) {
   907   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
   908   int store_offset = code_offset();
   909   switch (type) {
   910     case T_BOOLEAN: // fall through
   911     case T_BYTE  : __ stb(from_reg->as_register(), base, disp); break;
   912     case T_CHAR  : __ sth(from_reg->as_register(), base, disp); break;
   913     case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
   914     case T_INT   : __ stw(from_reg->as_register(), base, disp); break;
   915     case T_LONG  :
   916 #ifdef _LP64
   917       __ stx(from_reg->as_register_lo(), base, disp);
   918 #else
   919       assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
   920       __ std(from_reg->as_register_hi(), base, disp);
   921 #endif
   922       break;
   923     case T_ADDRESS:// fall through
   924     case T_ARRAY : // fall through
   925     case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break;
   926     case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
   927     case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
   928     default      : ShouldNotReachHere();
   929   }
   930   return store_offset;
   931 }
   934 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) {
   935   int load_offset;
   936   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
   937     assert(base != O7, "destroying register");
   938     assert(!unaligned, "can't handle this");
   939     // for offsets larger than a simm13 we setup the offset in O7
   940     __ sethi(offset & ~0x3ff, O7, true);
   941     __ add(O7, offset & 0x3ff, O7);
   942     load_offset = load(base, O7, to_reg, type);
   943   } else {
   944     load_offset = code_offset();
   945     switch(type) {
   946       case T_BOOLEAN: // fall through
   947       case T_BYTE  : __ ldsb(base, offset, to_reg->as_register()); break;
   948       case T_CHAR  : __ lduh(base, offset, to_reg->as_register()); break;
   949       case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
   950       case T_INT   : __ ld(base, offset, to_reg->as_register()); break;
   951       case T_LONG  :
   952         if (!unaligned) {
   953 #ifdef _LP64
   954           __ ldx(base, offset, to_reg->as_register_lo());
   955 #else
   956           assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
   957                  "must be sequential");
   958           __ ldd(base, offset, to_reg->as_register_hi());
   959 #endif
   960         } else {
   961 #ifdef _LP64
   962           assert(base != to_reg->as_register_lo(), "can't handle this");
   963           __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
   964           __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
   965           __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
   966 #else
   967           if (base == to_reg->as_register_lo()) {
   968             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
   969             __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
   970           } else {
   971             __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
   972             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
   973           }
   974 #endif
   975         }
   976         break;
   977       case T_ADDRESS:// fall through
   978       case T_ARRAY : // fall through
   979       case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break;
   980       case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
   981       case T_DOUBLE:
   982         {
   983           FloatRegister reg = to_reg->as_double_reg();
   984           // split unaligned loads
   985           if (unaligned || PatchALot) {
   986             __ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor());
   987             __ ldf(FloatRegisterImpl::S, base, offset,                reg);
   988           } else {
   989             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
   990           }
   991           break;
   992         }
   993       default      : ShouldNotReachHere();
   994     }
   995     if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
   996   }
   997   return load_offset;
   998 }
  1001 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) {
  1002   int load_offset = code_offset();
  1003   switch(type) {
  1004     case T_BOOLEAN: // fall through
  1005     case T_BYTE  : __ ldsb(base, disp, to_reg->as_register()); break;
  1006     case T_CHAR  : __ lduh(base, disp, to_reg->as_register()); break;
  1007     case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
  1008     case T_INT   : __ ld(base, disp, to_reg->as_register()); break;
  1009     case T_ADDRESS:// fall through
  1010     case T_ARRAY : // fall through
  1011     case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break;
  1012     case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
  1013     case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
  1014     case T_LONG  :
  1015 #ifdef _LP64
  1016       __ ldx(base, disp, to_reg->as_register_lo());
  1017 #else
  1018       assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
  1019              "must be sequential");
  1020       __ ldd(base, disp, to_reg->as_register_hi());
  1021 #endif
  1022       break;
  1023     default      : ShouldNotReachHere();
  1025   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
  1026   return load_offset;
  1030 // load/store with an Address
  1031 void LIR_Assembler::load(const Address& a, Register d,  BasicType ld_type, CodeEmitInfo *info, int offset) {
  1032   load(a.base(), a.disp() + offset, d, ld_type, info);
  1036 void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
  1037   store(value, dest.base(), dest.disp() + offset, type, info);
  1041 // loadf/storef with an Address
  1042 void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) {
  1043   load(a.base(), a.disp() + offset, d, ld_type, info);
  1047 void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
  1048   store(value, dest.base(), dest.disp() + offset, type, info);
  1052 // load/store with an Address
  1053 void LIR_Assembler::load(LIR_Address* a, Register d,  BasicType ld_type, CodeEmitInfo *info) {
  1054   load(as_Address(a), d, ld_type, info);
  1058 void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
  1059   store(value, as_Address(dest), type, info);
  1063 // loadf/storef with an Address
  1064 void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
  1065   load(as_Address(a), d, ld_type, info);
  1069 void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
  1070   store(value, as_Address(dest), type, info);
  1074 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
  1075   LIR_Const* c = src->as_constant_ptr();
  1076   switch (c->type()) {
  1077     case T_INT:
  1078     case T_FLOAT: {
  1079       Register src_reg = O7;
  1080       int value = c->as_jint_bits();
  1081       if (value == 0) {
  1082         src_reg = G0;
  1083       } else {
  1084         __ set(value, O7);
  1086       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
  1087       __ stw(src_reg, addr.base(), addr.disp());
  1088       break;
  1090     case T_OBJECT: {
  1091       Register src_reg = O7;
  1092       jobject2reg(c->as_jobject(), src_reg);
  1093       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
  1094       __ st_ptr(src_reg, addr.base(), addr.disp());
  1095       break;
  1097     case T_LONG:
  1098     case T_DOUBLE: {
  1099       Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
  1101       Register tmp = O7;
  1102       int value_lo = c->as_jint_lo_bits();
  1103       if (value_lo == 0) {
  1104         tmp = G0;
  1105       } else {
  1106         __ set(value_lo, O7);
  1108       __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
  1109       int value_hi = c->as_jint_hi_bits();
  1110       if (value_hi == 0) {
  1111         tmp = G0;
  1112       } else {
  1113         __ set(value_hi, O7);
  1115       __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
  1116       break;
  1118     default:
  1119       Unimplemented();
  1124 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
  1125   LIR_Const* c = src->as_constant_ptr();
  1126   LIR_Address* addr     = dest->as_address_ptr();
  1127   Register base = addr->base()->as_pointer_register();
  1129   if (info != NULL) {
  1130     add_debug_info_for_null_check_here(info);
  1132   switch (c->type()) {
  1133     case T_INT:
  1134     case T_FLOAT: {
  1135       LIR_Opr tmp = FrameMap::O7_opr;
  1136       int value = c->as_jint_bits();
  1137       if (value == 0) {
  1138         tmp = FrameMap::G0_opr;
  1139       } else if (Assembler::is_simm13(value)) {
  1140         __ set(value, O7);
  1142       if (addr->index()->is_valid()) {
  1143         assert(addr->disp() == 0, "must be zero");
  1144         store(tmp, base, addr->index()->as_pointer_register(), type);
  1145       } else {
  1146         assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
  1147         store(tmp, base, addr->disp(), type);
  1149       break;
  1151     case T_LONG:
  1152     case T_DOUBLE: {
  1153       assert(!addr->index()->is_valid(), "can't handle reg reg address here");
  1154       assert(Assembler::is_simm13(addr->disp()) &&
  1155              Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
  1157       Register tmp = O7;
  1158       int value_lo = c->as_jint_lo_bits();
  1159       if (value_lo == 0) {
  1160         tmp = G0;
  1161       } else {
  1162         __ set(value_lo, O7);
  1164       store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT);
  1165       int value_hi = c->as_jint_hi_bits();
  1166       if (value_hi == 0) {
  1167         tmp = G0;
  1168       } else {
  1169         __ set(value_hi, O7);
  1171       store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT);
  1172       break;
  1174     case T_OBJECT: {
  1175       jobject obj = c->as_jobject();
  1176       LIR_Opr tmp;
  1177       if (obj == NULL) {
  1178         tmp = FrameMap::G0_opr;
  1179       } else {
  1180         tmp = FrameMap::O7_opr;
  1181         jobject2reg(c->as_jobject(), O7);
  1183       // handle either reg+reg or reg+disp address
  1184       if (addr->index()->is_valid()) {
  1185         assert(addr->disp() == 0, "must be zero");
  1186         store(tmp, base, addr->index()->as_pointer_register(), type);
  1187       } else {
  1188         assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
  1189         store(tmp, base, addr->disp(), type);
  1192       break;
  1194     default:
  1195       Unimplemented();
  1200 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
  1201   LIR_Const* c = src->as_constant_ptr();
  1202   LIR_Opr to_reg = dest;
  1204   switch (c->type()) {
  1205     case T_INT:
  1207         jint con = c->as_jint();
  1208         if (to_reg->is_single_cpu()) {
  1209           assert(patch_code == lir_patch_none, "no patching handled here");
  1210           __ set(con, to_reg->as_register());
  1211         } else {
  1212           ShouldNotReachHere();
  1213           assert(to_reg->is_single_fpu(), "wrong register kind");
  1215           __ set(con, O7);
  1216           Address temp_slot(SP, 0, (frame::register_save_words * wordSize) + STACK_BIAS);
  1217           __ st(O7, temp_slot);
  1218           __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
  1221       break;
  1223     case T_LONG:
  1225         jlong con = c->as_jlong();
  1227         if (to_reg->is_double_cpu()) {
  1228 #ifdef _LP64
  1229           __ set(con,  to_reg->as_register_lo());
  1230 #else
  1231           __ set(low(con),  to_reg->as_register_lo());
  1232           __ set(high(con), to_reg->as_register_hi());
  1233 #endif
  1234 #ifdef _LP64
  1235         } else if (to_reg->is_single_cpu()) {
  1236           __ set(con, to_reg->as_register());
  1237 #endif
  1238         } else {
  1239           ShouldNotReachHere();
  1240           assert(to_reg->is_double_fpu(), "wrong register kind");
  1241           Address temp_slot_lo(SP, 0, ((frame::register_save_words  ) * wordSize) + STACK_BIAS);
  1242           Address temp_slot_hi(SP, 0, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
  1243           __ set(low(con),  O7);
  1244           __ st(O7, temp_slot_lo);
  1245           __ set(high(con), O7);
  1246           __ st(O7, temp_slot_hi);
  1247           __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
  1250       break;
  1252     case T_OBJECT:
  1254         if (patch_code == lir_patch_none) {
  1255           jobject2reg(c->as_jobject(), to_reg->as_register());
  1256         } else {
  1257           jobject2reg_with_patching(to_reg->as_register(), info);
  1260       break;
  1262     case T_FLOAT:
  1264         address const_addr = __ float_constant(c->as_jfloat());
  1265         if (const_addr == NULL) {
  1266           bailout("const section overflow");
  1267           break;
  1269         RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
  1270         if (to_reg->is_single_fpu()) {
  1271           __ sethi(  (intx)const_addr & ~0x3ff, O7, true, rspec);
  1272           __ relocate(rspec);
  1274           int offset = (intx)const_addr & 0x3ff;
  1275           __ ldf (FloatRegisterImpl::S, O7, offset, to_reg->as_float_reg());
  1277         } else {
  1278           assert(to_reg->is_single_cpu(), "Must be a cpu register.");
  1280           __ set((intx)const_addr, O7, rspec);
  1281           load(O7, 0, to_reg->as_register(), T_INT);
  1284       break;
  1286     case T_DOUBLE:
  1288         address const_addr = __ double_constant(c->as_jdouble());
  1289         if (const_addr == NULL) {
  1290           bailout("const section overflow");
  1291           break;
  1293         RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
  1295         if (to_reg->is_double_fpu()) {
  1296           __ sethi(  (intx)const_addr & ~0x3ff, O7, true, rspec);
  1297           int offset = (intx)const_addr & 0x3ff;
  1298           __ relocate(rspec);
  1299           __ ldf (FloatRegisterImpl::D, O7, offset, to_reg->as_double_reg());
  1300         } else {
  1301           assert(to_reg->is_double_cpu(), "Must be a long register.");
  1302 #ifdef _LP64
  1303           __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
  1304 #else
  1305           __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
  1306           __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
  1307 #endif
  1311       break;
  1313     default:
  1314       ShouldNotReachHere();
  1318 Address LIR_Assembler::as_Address(LIR_Address* addr) {
  1319   Register reg = addr->base()->as_register();
  1320   return Address(reg, 0, addr->disp());
  1324 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
  1325   switch (type) {
  1326     case T_INT:
  1327     case T_FLOAT: {
  1328       Register tmp = O7;
  1329       Address from = frame_map()->address_for_slot(src->single_stack_ix());
  1330       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
  1331       __ lduw(from.base(), from.disp(), tmp);
  1332       __ stw(tmp, to.base(), to.disp());
  1333       break;
  1335     case T_OBJECT: {
  1336       Register tmp = O7;
  1337       Address from = frame_map()->address_for_slot(src->single_stack_ix());
  1338       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
  1339       __ ld_ptr(from.base(), from.disp(), tmp);
  1340       __ st_ptr(tmp, to.base(), to.disp());
  1341       break;
  1343     case T_LONG:
  1344     case T_DOUBLE: {
  1345       Register tmp = O7;
  1346       Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
  1347       Address to   = frame_map()->address_for_double_slot(dest->double_stack_ix());
  1348       __ lduw(from.base(), from.disp(), tmp);
  1349       __ stw(tmp, to.base(), to.disp());
  1350       __ lduw(from.base(), from.disp() + 4, tmp);
  1351       __ stw(tmp, to.base(), to.disp() + 4);
  1352       break;
  1355     default:
  1356       ShouldNotReachHere();
  1361 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
  1362   Address base = as_Address(addr);
  1363   return Address(base.base(), 0, base.disp() + hi_word_offset_in_bytes);
  1367 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
  1368   Address base = as_Address(addr);
  1369   return Address(base.base(), 0, base.disp() + lo_word_offset_in_bytes);
  1373 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
  1374                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) {
  1376   LIR_Address* addr = src_opr->as_address_ptr();
  1377   LIR_Opr to_reg = dest;
  1379   Register src = addr->base()->as_pointer_register();
  1380   Register disp_reg = noreg;
  1381   int disp_value = addr->disp();
  1382   bool needs_patching = (patch_code != lir_patch_none);
  1384   if (addr->base()->type() == T_OBJECT) {
  1385     __ verify_oop(src);
  1388   PatchingStub* patch = NULL;
  1389   if (needs_patching) {
  1390     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
  1391     assert(!to_reg->is_double_cpu() ||
  1392            patch_code == lir_patch_none ||
  1393            patch_code == lir_patch_normal, "patching doesn't match register");
  1396   if (addr->index()->is_illegal()) {
  1397     if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
  1398       if (needs_patching) {
  1399         __ sethi(0, O7, true);
  1400         __ add(O7, 0, O7);
  1401       } else {
  1402         __ set(disp_value, O7);
  1404       disp_reg = O7;
  1406   } else if (unaligned || PatchALot) {
  1407     __ add(src, addr->index()->as_register(), O7);
  1408     src = O7;
  1409   } else {
  1410     disp_reg = addr->index()->as_pointer_register();
  1411     assert(disp_value == 0, "can't handle 3 operand addresses");
  1414   // remember the offset of the load.  The patching_epilog must be done
  1415   // before the call to add_debug_info, otherwise the PcDescs don't get
  1416   // entered in increasing order.
  1417   int offset = code_offset();
  1419   assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
  1420   if (disp_reg == noreg) {
  1421     offset = load(src, disp_value, to_reg, type, unaligned);
  1422   } else {
  1423     assert(!unaligned, "can't handle this");
  1424     offset = load(src, disp_reg, to_reg, type);
  1427   if (patch != NULL) {
  1428     patching_epilog(patch, patch_code, src, info);
  1431   if (info != NULL) add_debug_info_for_null_check(offset, info);
  1435 void LIR_Assembler::prefetchr(LIR_Opr src) {
  1436   LIR_Address* addr = src->as_address_ptr();
  1437   Address from_addr = as_Address(addr);
  1439   if (VM_Version::has_v9()) {
  1440     __ prefetch(from_addr, Assembler::severalReads);
  1445 void LIR_Assembler::prefetchw(LIR_Opr src) {
  1446   LIR_Address* addr = src->as_address_ptr();
  1447   Address from_addr = as_Address(addr);
  1449   if (VM_Version::has_v9()) {
  1450     __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
  1455 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
  1456   Address addr;
  1457   if (src->is_single_word()) {
  1458     addr = frame_map()->address_for_slot(src->single_stack_ix());
  1459   } else if (src->is_double_word())  {
  1460     addr = frame_map()->address_for_double_slot(src->double_stack_ix());
  1463   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
  1464   load(addr.base(), addr.disp(), dest, dest->type(), unaligned);
  1468 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
  1469   Address addr;
  1470   if (dest->is_single_word()) {
  1471     addr = frame_map()->address_for_slot(dest->single_stack_ix());
  1472   } else if (dest->is_double_word())  {
  1473     addr = frame_map()->address_for_slot(dest->double_stack_ix());
  1475   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
  1476   store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned);
  1480 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
  1481   if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
  1482     if (from_reg->is_double_fpu()) {
  1483       // double to double moves
  1484       assert(to_reg->is_double_fpu(), "should match");
  1485       __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
  1486     } else {
  1487       // float to float moves
  1488       assert(to_reg->is_single_fpu(), "should match");
  1489       __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
  1491   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
  1492     if (from_reg->is_double_cpu()) {
  1493 #ifdef _LP64
  1494       __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
  1495 #else
  1496       assert(to_reg->is_double_cpu() &&
  1497              from_reg->as_register_hi() != to_reg->as_register_lo() &&
  1498              from_reg->as_register_lo() != to_reg->as_register_hi(),
  1499              "should both be long and not overlap");
  1500       // long to long moves
  1501       __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
  1502       __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
  1503 #endif
  1504 #ifdef _LP64
  1505     } else if (to_reg->is_double_cpu()) {
  1506       // int to int moves
  1507       __ mov(from_reg->as_register(), to_reg->as_register_lo());
  1508 #endif
  1509     } else {
  1510       // int to int moves
  1511       __ mov(from_reg->as_register(), to_reg->as_register());
  1513   } else {
  1514     ShouldNotReachHere();
  1516   if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
  1517     __ verify_oop(to_reg->as_register());
  1522 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
  1523                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
  1524                             bool unaligned) {
  1525   LIR_Address* addr = dest->as_address_ptr();
  1527   Register src = addr->base()->as_pointer_register();
  1528   Register disp_reg = noreg;
  1529   int disp_value = addr->disp();
  1530   bool needs_patching = (patch_code != lir_patch_none);
  1532   if (addr->base()->is_oop_register()) {
  1533     __ verify_oop(src);
  1536   PatchingStub* patch = NULL;
  1537   if (needs_patching) {
  1538     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
  1539     assert(!from_reg->is_double_cpu() ||
  1540            patch_code == lir_patch_none ||
  1541            patch_code == lir_patch_normal, "patching doesn't match register");
  1544   if (addr->index()->is_illegal()) {
  1545     if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
  1546       if (needs_patching) {
  1547         __ sethi(0, O7, true);
  1548         __ add(O7, 0, O7);
  1549       } else {
  1550         __ set(disp_value, O7);
  1552       disp_reg = O7;
  1554   } else if (unaligned || PatchALot) {
  1555     __ add(src, addr->index()->as_register(), O7);
  1556     src = O7;
  1557   } else {
  1558     disp_reg = addr->index()->as_pointer_register();
  1559     assert(disp_value == 0, "can't handle 3 operand addresses");
  1562   // remember the offset of the store.  The patching_epilog must be done
  1563   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
  1564   // entered in increasing order.
  1565   int offset;
  1567   assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
  1568   if (disp_reg == noreg) {
  1569     offset = store(from_reg, src, disp_value, type, unaligned);
  1570   } else {
  1571     assert(!unaligned, "can't handle this");
  1572     offset = store(from_reg, src, disp_reg, type);
  1575   if (patch != NULL) {
  1576     patching_epilog(patch, patch_code, src, info);
  1579   if (info != NULL) add_debug_info_for_null_check(offset, info);
  1583 void LIR_Assembler::return_op(LIR_Opr result) {
  1584   // the poll may need a register so just pick one that isn't the return register
  1585 #ifdef TIERED
  1586   if (result->type_field() == LIR_OprDesc::long_type) {
  1587     // Must move the result to G1
  1588     // Must leave proper result in O0,O1 and G1 (TIERED only)
  1589     __ sllx(I0, 32, G1);          // Shift bits into high G1
  1590     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
  1591     __ or3 (I1, G1, G1);          // OR 64 bits into G1
  1593 #endif // TIERED
  1594   __ set((intptr_t)os::get_polling_page(), L0);
  1595   __ relocate(relocInfo::poll_return_type);
  1596   __ ld_ptr(L0, 0, G0);
  1597   __ ret();
  1598   __ delayed()->restore();
  1602 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
  1603   __ set((intptr_t)os::get_polling_page(), tmp->as_register());
  1604   if (info != NULL) {
  1605     add_debug_info_for_branch(info);
  1606   } else {
  1607     __ relocate(relocInfo::poll_type);
  1610   int offset = __ offset();
  1611   __ ld_ptr(tmp->as_register(), 0, G0);
  1613   return offset;
  1617 void LIR_Assembler::emit_static_call_stub() {
  1618   address call_pc = __ pc();
  1619   address stub = __ start_a_stub(call_stub_size);
  1620   if (stub == NULL) {
  1621     bailout("static call stub overflow");
  1622     return;
  1625   int start = __ offset();
  1626   __ relocate(static_stub_Relocation::spec(call_pc));
  1628   __ set_oop(NULL, G5);
  1629   // must be set to -1 at code generation time
  1630   Address a(G3, (address)-1);
  1631   __ jump_to(a, 0);
  1632   __ delayed()->nop();
  1634   assert(__ offset() - start <= call_stub_size, "stub too big");
  1635   __ end_a_stub();
  1639 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
  1640   if (opr1->is_single_fpu()) {
  1641     __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
  1642   } else if (opr1->is_double_fpu()) {
  1643     __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
  1644   } else if (opr1->is_single_cpu()) {
  1645     if (opr2->is_constant()) {
  1646       switch (opr2->as_constant_ptr()->type()) {
  1647         case T_INT:
  1648           { jint con = opr2->as_constant_ptr()->as_jint();
  1649             if (Assembler::is_simm13(con)) {
  1650               __ cmp(opr1->as_register(), con);
  1651             } else {
  1652               __ set(con, O7);
  1653               __ cmp(opr1->as_register(), O7);
  1656           break;
  1658         case T_OBJECT:
  1659           // there are only equal/notequal comparisions on objects
  1660           { jobject con = opr2->as_constant_ptr()->as_jobject();
  1661             if (con == NULL) {
  1662               __ cmp(opr1->as_register(), 0);
  1663             } else {
  1664               jobject2reg(con, O7);
  1665               __ cmp(opr1->as_register(), O7);
  1668           break;
  1670         default:
  1671           ShouldNotReachHere();
  1672           break;
  1674     } else {
  1675       if (opr2->is_address()) {
  1676         LIR_Address * addr = opr2->as_address_ptr();
  1677         BasicType type = addr->type();
  1678         if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
  1679         else                    __ ld(as_Address(addr), O7);
  1680         __ cmp(opr1->as_register(), O7);
  1681       } else {
  1682         __ cmp(opr1->as_register(), opr2->as_register());
  1685   } else if (opr1->is_double_cpu()) {
  1686     Register xlo = opr1->as_register_lo();
  1687     Register xhi = opr1->as_register_hi();
  1688     if (opr2->is_constant() && opr2->as_jlong() == 0) {
  1689       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
  1690 #ifdef _LP64
  1691       __ orcc(xhi, G0, G0);
  1692 #else
  1693       __ orcc(xhi, xlo, G0);
  1694 #endif
  1695     } else if (opr2->is_register()) {
  1696       Register ylo = opr2->as_register_lo();
  1697       Register yhi = opr2->as_register_hi();
  1698 #ifdef _LP64
  1699       __ cmp(xlo, ylo);
  1700 #else
  1701       __ subcc(xlo, ylo, xlo);
  1702       __ subccc(xhi, yhi, xhi);
  1703       if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
  1704         __ orcc(xhi, xlo, G0);
  1706 #endif
  1707     } else {
  1708       ShouldNotReachHere();
  1710   } else if (opr1->is_address()) {
  1711     LIR_Address * addr = opr1->as_address_ptr();
  1712     BasicType type = addr->type();
  1713     assert (opr2->is_constant(), "Checking");
  1714     if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
  1715     else                    __ ld(as_Address(addr), O7);
  1716     __ cmp(O7, opr2->as_constant_ptr()->as_jint());
  1717   } else {
  1718     ShouldNotReachHere();
  1723 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
  1724   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
  1725     bool is_unordered_less = (code == lir_ucmp_fd2i);
  1726     if (left->is_single_fpu()) {
  1727       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
  1728     } else if (left->is_double_fpu()) {
  1729       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
  1730     } else {
  1731       ShouldNotReachHere();
  1733   } else if (code == lir_cmp_l2i) {
  1734     __ lcmp(left->as_register_hi(),  left->as_register_lo(),
  1735             right->as_register_hi(), right->as_register_lo(),
  1736             dst->as_register());
  1737   } else {
  1738     ShouldNotReachHere();
  1743 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
  1745   Assembler::Condition acond;
  1746   switch (condition) {
  1747     case lir_cond_equal:        acond = Assembler::equal;        break;
  1748     case lir_cond_notEqual:     acond = Assembler::notEqual;     break;
  1749     case lir_cond_less:         acond = Assembler::less;         break;
  1750     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    break;
  1751     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
  1752     case lir_cond_greater:      acond = Assembler::greater;      break;
  1753     case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned;      break;
  1754     case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;      break;
  1755     default:                         ShouldNotReachHere();
  1756   };
  1758   if (opr1->is_constant() && opr1->type() == T_INT) {
  1759     Register dest = result->as_register();
  1760     // load up first part of constant before branch
  1761     // and do the rest in the delay slot.
  1762     if (!Assembler::is_simm13(opr1->as_jint())) {
  1763       __ sethi(opr1->as_jint(), dest);
  1765   } else if (opr1->is_constant()) {
  1766     const2reg(opr1, result, lir_patch_none, NULL);
  1767   } else if (opr1->is_register()) {
  1768     reg2reg(opr1, result);
  1769   } else if (opr1->is_stack()) {
  1770     stack2reg(opr1, result, result->type());
  1771   } else {
  1772     ShouldNotReachHere();
  1774   Label skip;
  1775   __ br(acond, false, Assembler::pt, skip);
  1776   if (opr1->is_constant() && opr1->type() == T_INT) {
  1777     Register dest = result->as_register();
  1778     if (Assembler::is_simm13(opr1->as_jint())) {
  1779       __ delayed()->or3(G0, opr1->as_jint(), dest);
  1780     } else {
  1781       // the sethi has been done above, so just put in the low 10 bits
  1782       __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
  1784   } else {
  1785     // can't do anything useful in the delay slot
  1786     __ delayed()->nop();
  1788   if (opr2->is_constant()) {
  1789     const2reg(opr2, result, lir_patch_none, NULL);
  1790   } else if (opr2->is_register()) {
  1791     reg2reg(opr2, result);
  1792   } else if (opr2->is_stack()) {
  1793     stack2reg(opr2, result, result->type());
  1794   } else {
  1795     ShouldNotReachHere();
  1797   __ bind(skip);
  1801 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
  1802   assert(info == NULL, "unused on this code path");
  1803   assert(left->is_register(), "wrong items state");
  1804   assert(dest->is_register(), "wrong items state");
  1806   if (right->is_register()) {
  1807     if (dest->is_float_kind()) {
  1809       FloatRegister lreg, rreg, res;
  1810       FloatRegisterImpl::Width w;
  1811       if (right->is_single_fpu()) {
  1812         w = FloatRegisterImpl::S;
  1813         lreg = left->as_float_reg();
  1814         rreg = right->as_float_reg();
  1815         res  = dest->as_float_reg();
  1816       } else {
  1817         w = FloatRegisterImpl::D;
  1818         lreg = left->as_double_reg();
  1819         rreg = right->as_double_reg();
  1820         res  = dest->as_double_reg();
  1823       switch (code) {
  1824         case lir_add: __ fadd(w, lreg, rreg, res); break;
  1825         case lir_sub: __ fsub(w, lreg, rreg, res); break;
  1826         case lir_mul: // fall through
  1827         case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
  1828         case lir_div: // fall through
  1829         case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
  1830         default: ShouldNotReachHere();
  1833     } else if (dest->is_double_cpu()) {
  1834 #ifdef _LP64
  1835       Register dst_lo = dest->as_register_lo();
  1836       Register op1_lo = left->as_pointer_register();
  1837       Register op2_lo = right->as_pointer_register();
  1839       switch (code) {
  1840         case lir_add:
  1841           __ add(op1_lo, op2_lo, dst_lo);
  1842           break;
  1844         case lir_sub:
  1845           __ sub(op1_lo, op2_lo, dst_lo);
  1846           break;
  1848         default: ShouldNotReachHere();
  1850 #else
  1851       Register op1_lo = left->as_register_lo();
  1852       Register op1_hi = left->as_register_hi();
  1853       Register op2_lo = right->as_register_lo();
  1854       Register op2_hi = right->as_register_hi();
  1855       Register dst_lo = dest->as_register_lo();
  1856       Register dst_hi = dest->as_register_hi();
  1858       switch (code) {
  1859         case lir_add:
  1860           __ addcc(op1_lo, op2_lo, dst_lo);
  1861           __ addc (op1_hi, op2_hi, dst_hi);
  1862           break;
  1864         case lir_sub:
  1865           __ subcc(op1_lo, op2_lo, dst_lo);
  1866           __ subc (op1_hi, op2_hi, dst_hi);
  1867           break;
  1869         default: ShouldNotReachHere();
  1871 #endif
  1872     } else {
  1873       assert (right->is_single_cpu(), "Just Checking");
  1875       Register lreg = left->as_register();
  1876       Register res  = dest->as_register();
  1877       Register rreg = right->as_register();
  1878       switch (code) {
  1879         case lir_add:  __ add  (lreg, rreg, res); break;
  1880         case lir_sub:  __ sub  (lreg, rreg, res); break;
  1881         case lir_mul:  __ mult (lreg, rreg, res); break;
  1882         default: ShouldNotReachHere();
  1885   } else {
  1886     assert (right->is_constant(), "must be constant");
  1888     if (dest->is_single_cpu()) {
  1889       Register lreg = left->as_register();
  1890       Register res  = dest->as_register();
  1891       int    simm13 = right->as_constant_ptr()->as_jint();
  1893       switch (code) {
  1894         case lir_add:  __ add  (lreg, simm13, res); break;
  1895         case lir_sub:  __ sub  (lreg, simm13, res); break;
  1896         case lir_mul:  __ mult (lreg, simm13, res); break;
  1897         default: ShouldNotReachHere();
  1899     } else {
  1900       Register lreg = left->as_pointer_register();
  1901       Register res  = dest->as_register_lo();
  1902       long con = right->as_constant_ptr()->as_jlong();
  1903       assert(Assembler::is_simm13(con), "must be simm13");
  1905       switch (code) {
  1906         case lir_add:  __ add  (lreg, (int)con, res); break;
  1907         case lir_sub:  __ sub  (lreg, (int)con, res); break;
  1908         case lir_mul:  __ mult (lreg, (int)con, res); break;
  1909         default: ShouldNotReachHere();
  1916 void LIR_Assembler::fpop() {
  1917   // do nothing
  1921 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
  1922   switch (code) {
  1923     case lir_sin:
  1924     case lir_tan:
  1925     case lir_cos: {
  1926       assert(thread->is_valid(), "preserve the thread object for performance reasons");
  1927       assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
  1928       break;
  1930     case lir_sqrt: {
  1931       assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
  1932       FloatRegister src_reg = value->as_double_reg();
  1933       FloatRegister dst_reg = dest->as_double_reg();
  1934       __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
  1935       break;
  1937     case lir_abs: {
  1938       assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
  1939       FloatRegister src_reg = value->as_double_reg();
  1940       FloatRegister dst_reg = dest->as_double_reg();
  1941       __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
  1942       break;
  1944     default: {
  1945       ShouldNotReachHere();
  1946       break;
  1952 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
  1953   if (right->is_constant()) {
  1954     if (dest->is_single_cpu()) {
  1955       int simm13 = right->as_constant_ptr()->as_jint();
  1956       switch (code) {
  1957         case lir_logic_and:   __ and3 (left->as_register(), simm13, dest->as_register()); break;
  1958         case lir_logic_or:    __ or3  (left->as_register(), simm13, dest->as_register()); break;
  1959         case lir_logic_xor:   __ xor3 (left->as_register(), simm13, dest->as_register()); break;
  1960         default: ShouldNotReachHere();
  1962     } else {
  1963       long c = right->as_constant_ptr()->as_jlong();
  1964       assert(c == (int)c && Assembler::is_simm13(c), "out of range");
  1965       int simm13 = (int)c;
  1966       switch (code) {
  1967         case lir_logic_and:
  1968 #ifndef _LP64
  1969           __ and3 (left->as_register_hi(), 0,      dest->as_register_hi());
  1970 #endif
  1971           __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
  1972           break;
  1974         case lir_logic_or:
  1975 #ifndef _LP64
  1976           __ or3 (left->as_register_hi(), 0,      dest->as_register_hi());
  1977 #endif
  1978           __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
  1979           break;
  1981         case lir_logic_xor:
  1982 #ifndef _LP64
  1983           __ xor3 (left->as_register_hi(), 0,      dest->as_register_hi());
  1984 #endif
  1985           __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
  1986           break;
  1988         default: ShouldNotReachHere();
  1991   } else {
  1992     assert(right->is_register(), "right should be in register");
  1994     if (dest->is_single_cpu()) {
  1995       switch (code) {
  1996         case lir_logic_and:   __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
  1997         case lir_logic_or:    __ or3  (left->as_register(), right->as_register(), dest->as_register()); break;
  1998         case lir_logic_xor:   __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
  1999         default: ShouldNotReachHere();
  2001     } else {
  2002 #ifdef _LP64
  2003       Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
  2004                                                                         left->as_register_lo();
  2005       Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
  2006                                                                           right->as_register_lo();
  2008       switch (code) {
  2009         case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
  2010         case lir_logic_or:  __ or3  (l, r, dest->as_register_lo()); break;
  2011         case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
  2012         default: ShouldNotReachHere();
  2014 #else
  2015       switch (code) {
  2016         case lir_logic_and:
  2017           __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
  2018           __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
  2019           break;
  2021         case lir_logic_or:
  2022           __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
  2023           __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
  2024           break;
  2026         case lir_logic_xor:
  2027           __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
  2028           __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
  2029           break;
  2031         default: ShouldNotReachHere();
  2033 #endif
  2039 int LIR_Assembler::shift_amount(BasicType t) {
  2040   int elem_size = type2aelembytes(t);
  2041   switch (elem_size) {
  2042     case 1 : return 0;
  2043     case 2 : return 1;
  2044     case 4 : return 2;
  2045     case 8 : return 3;
  2047   ShouldNotReachHere();
  2048   return -1;
  2052 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
  2053   assert(exceptionOop->as_register() == Oexception, "should match");
  2054   assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match");
  2056   info->add_register_oop(exceptionOop);
  2058   if (unwind) {
  2059     __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
  2060     __ delayed()->nop();
  2061   } else {
  2062     // reuse the debug info from the safepoint poll for the throw op itself
  2063     address pc_for_athrow  = __ pc();
  2064     int pc_for_athrow_offset = __ offset();
  2065     RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
  2066     __ set((intptr_t)pc_for_athrow, Oissuing_pc, rspec);
  2067     add_call_info(pc_for_athrow_offset, info); // for exception handler
  2069     __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
  2070     __ delayed()->nop();
  2075 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
  2076   Register src = op->src()->as_register();
  2077   Register dst = op->dst()->as_register();
  2078   Register src_pos = op->src_pos()->as_register();
  2079   Register dst_pos = op->dst_pos()->as_register();
  2080   Register length  = op->length()->as_register();
  2081   Register tmp = op->tmp()->as_register();
  2082   Register tmp2 = O7;
  2084   int flags = op->flags();
  2085   ciArrayKlass* default_type = op->expected_type();
  2086   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
  2087   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
  2089   // set up the arraycopy stub information
  2090   ArrayCopyStub* stub = op->stub();
  2092   // always do stub if no type information is available.  it's ok if
  2093   // the known type isn't loaded since the code sanity checks
  2094   // in debug mode and the type isn't required when we know the exact type
  2095   // also check that the type is an array type.
  2096   // We also, for now, always call the stub if the barrier set requires a
  2097   // write_ref_pre barrier (which the stub does, but none of the optimized
  2098   // cases currently does).
  2099   if (op->expected_type() == NULL ||
  2100       Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) {
  2101     __ mov(src,     O0);
  2102     __ mov(src_pos, O1);
  2103     __ mov(dst,     O2);
  2104     __ mov(dst_pos, O3);
  2105     __ mov(length,  O4);
  2106     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
  2108     __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
  2109     __ delayed()->nop();
  2110     __ bind(*stub->continuation());
  2111     return;
  2114   assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
  2116   // make sure src and dst are non-null and load array length
  2117   if (flags & LIR_OpArrayCopy::src_null_check) {
  2118     __ tst(src);
  2119     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
  2120     __ delayed()->nop();
  2123   if (flags & LIR_OpArrayCopy::dst_null_check) {
  2124     __ tst(dst);
  2125     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
  2126     __ delayed()->nop();
  2129   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
  2130     // test src_pos register
  2131     __ tst(src_pos);
  2132     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
  2133     __ delayed()->nop();
  2136   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
  2137     // test dst_pos register
  2138     __ tst(dst_pos);
  2139     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
  2140     __ delayed()->nop();
  2143   if (flags & LIR_OpArrayCopy::length_positive_check) {
  2144     // make sure length isn't negative
  2145     __ tst(length);
  2146     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
  2147     __ delayed()->nop();
  2150   if (flags & LIR_OpArrayCopy::src_range_check) {
  2151     __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
  2152     __ add(length, src_pos, tmp);
  2153     __ cmp(tmp2, tmp);
  2154     __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
  2155     __ delayed()->nop();
  2158   if (flags & LIR_OpArrayCopy::dst_range_check) {
  2159     __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
  2160     __ add(length, dst_pos, tmp);
  2161     __ cmp(tmp2, tmp);
  2162     __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
  2163     __ delayed()->nop();
  2166   if (flags & LIR_OpArrayCopy::type_check) {
  2167     __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
  2168     __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
  2169     __ cmp(tmp, tmp2);
  2170     __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
  2171     __ delayed()->nop();
  2174 #ifdef ASSERT
  2175   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
  2176     // Sanity check the known type with the incoming class.  For the
  2177     // primitive case the types must match exactly with src.klass and
  2178     // dst.klass each exactly matching the default type.  For the
  2179     // object array case, if no type check is needed then either the
  2180     // dst type is exactly the expected type and the src type is a
  2181     // subtype which we can't check or src is the same array as dst
  2182     // but not necessarily exactly of type default_type.
  2183     Label known_ok, halt;
  2184     jobject2reg(op->expected_type()->encoding(), tmp);
  2185     __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
  2186     if (basic_type != T_OBJECT) {
  2187       __ cmp(tmp, tmp2);
  2188       __ br(Assembler::notEqual, false, Assembler::pn, halt);
  2189       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
  2190       __ cmp(tmp, tmp2);
  2191       __ br(Assembler::equal, false, Assembler::pn, known_ok);
  2192       __ delayed()->nop();
  2193     } else {
  2194       __ cmp(tmp, tmp2);
  2195       __ br(Assembler::equal, false, Assembler::pn, known_ok);
  2196       __ delayed()->cmp(src, dst);
  2197       __ br(Assembler::equal, false, Assembler::pn, known_ok);
  2198       __ delayed()->nop();
  2200     __ bind(halt);
  2201     __ stop("incorrect type information in arraycopy");
  2202     __ bind(known_ok);
  2204 #endif
  2206   int shift = shift_amount(basic_type);
  2208   Register src_ptr = O0;
  2209   Register dst_ptr = O1;
  2210   Register len     = O2;
  2212   __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
  2213   if (shift == 0) {
  2214     __ add(src_ptr, src_pos, src_ptr);
  2215   } else {
  2216     __ sll(src_pos, shift, tmp);
  2217     __ add(src_ptr, tmp, src_ptr);
  2220   __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
  2221   if (shift == 0) {
  2222     __ add(dst_ptr, dst_pos, dst_ptr);
  2223   } else {
  2224     __ sll(dst_pos, shift, tmp);
  2225     __ add(dst_ptr, tmp, dst_ptr);
  2228   if (basic_type != T_OBJECT) {
  2229     if (shift == 0) {
  2230       __ mov(length, len);
  2231     } else {
  2232       __ sll(length, shift, len);
  2234     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy));
  2235   } else {
  2236     // oop_arraycopy takes a length in number of elements, so don't scale it.
  2237     __ mov(length, len);
  2238     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy));
  2241   __ bind(*stub->continuation());
  2245 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
  2246   if (dest->is_single_cpu()) {
  2247 #ifdef _LP64
  2248     if (left->type() == T_OBJECT) {
  2249       switch (code) {
  2250         case lir_shl:  __ sllx  (left->as_register(), count->as_register(), dest->as_register()); break;
  2251         case lir_shr:  __ srax  (left->as_register(), count->as_register(), dest->as_register()); break;
  2252         case lir_ushr: __ srl   (left->as_register(), count->as_register(), dest->as_register()); break;
  2253         default: ShouldNotReachHere();
  2255     } else
  2256 #endif
  2257       switch (code) {
  2258         case lir_shl:  __ sll   (left->as_register(), count->as_register(), dest->as_register()); break;
  2259         case lir_shr:  __ sra   (left->as_register(), count->as_register(), dest->as_register()); break;
  2260         case lir_ushr: __ srl   (left->as_register(), count->as_register(), dest->as_register()); break;
  2261         default: ShouldNotReachHere();
  2263   } else {
  2264 #ifdef _LP64
  2265     switch (code) {
  2266       case lir_shl:  __ sllx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
  2267       case lir_shr:  __ srax  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
  2268       case lir_ushr: __ srlx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
  2269       default: ShouldNotReachHere();
  2271 #else
  2272     switch (code) {
  2273       case lir_shl:  __ lshl  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
  2274       case lir_shr:  __ lshr  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
  2275       case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
  2276       default: ShouldNotReachHere();
  2278 #endif
  2283 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
  2284 #ifdef _LP64
  2285   if (left->type() == T_OBJECT) {
  2286     count = count & 63;  // shouldn't shift by more than sizeof(intptr_t)
  2287     Register l = left->as_register();
  2288     Register d = dest->as_register_lo();
  2289     switch (code) {
  2290       case lir_shl:  __ sllx  (l, count, d); break;
  2291       case lir_shr:  __ srax  (l, count, d); break;
  2292       case lir_ushr: __ srlx  (l, count, d); break;
  2293       default: ShouldNotReachHere();
  2295     return;
  2297 #endif
  2299   if (dest->is_single_cpu()) {
  2300     count = count & 0x1F; // Java spec
  2301     switch (code) {
  2302       case lir_shl:  __ sll   (left->as_register(), count, dest->as_register()); break;
  2303       case lir_shr:  __ sra   (left->as_register(), count, dest->as_register()); break;
  2304       case lir_ushr: __ srl   (left->as_register(), count, dest->as_register()); break;
  2305       default: ShouldNotReachHere();
  2307   } else if (dest->is_double_cpu()) {
  2308     count = count & 63; // Java spec
  2309     switch (code) {
  2310       case lir_shl:  __ sllx  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
  2311       case lir_shr:  __ srax  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
  2312       case lir_ushr: __ srlx  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
  2313       default: ShouldNotReachHere();
  2315   } else {
  2316     ShouldNotReachHere();
  2321 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
  2322   assert(op->tmp1()->as_register()  == G1 &&
  2323          op->tmp2()->as_register()  == G3 &&
  2324          op->tmp3()->as_register()  == G4 &&
  2325          op->obj()->as_register()   == O0 &&
  2326          op->klass()->as_register() == G5, "must be");
  2327   if (op->init_check()) {
  2328     __ ld(op->klass()->as_register(),
  2329           instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc),
  2330           op->tmp1()->as_register());
  2331     add_debug_info_for_null_check_here(op->stub()->info());
  2332     __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
  2333     __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
  2334     __ delayed()->nop();
  2336   __ allocate_object(op->obj()->as_register(),
  2337                      op->tmp1()->as_register(),
  2338                      op->tmp2()->as_register(),
  2339                      op->tmp3()->as_register(),
  2340                      op->header_size(),
  2341                      op->object_size(),
  2342                      op->klass()->as_register(),
  2343                      *op->stub()->entry());
  2344   __ bind(*op->stub()->continuation());
  2345   __ verify_oop(op->obj()->as_register());
  2349 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
  2350   assert(op->tmp1()->as_register()  == G1 &&
  2351          op->tmp2()->as_register()  == G3 &&
  2352          op->tmp3()->as_register()  == G4 &&
  2353          op->tmp4()->as_register()  == O1 &&
  2354          op->klass()->as_register() == G5, "must be");
  2355   if (UseSlowPath ||
  2356       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
  2357       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
  2358     __ br(Assembler::always, false, Assembler::pn, *op->stub()->entry());
  2359     __ delayed()->nop();
  2360   } else {
  2361     __ allocate_array(op->obj()->as_register(),
  2362                       op->len()->as_register(),
  2363                       op->tmp1()->as_register(),
  2364                       op->tmp2()->as_register(),
  2365                       op->tmp3()->as_register(),
  2366                       arrayOopDesc::header_size(op->type()),
  2367                       type2aelembytes(op->type()),
  2368                       op->klass()->as_register(),
  2369                       *op->stub()->entry());
  2371   __ bind(*op->stub()->continuation());
  2375 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
  2376   LIR_Code code = op->code();
  2377   if (code == lir_store_check) {
  2378     Register value = op->object()->as_register();
  2379     Register array = op->array()->as_register();
  2380     Register k_RInfo = op->tmp1()->as_register();
  2381     Register klass_RInfo = op->tmp2()->as_register();
  2382     Register Rtmp1 = op->tmp3()->as_register();
  2384     __ verify_oop(value);
  2386     CodeStub* stub = op->stub();
  2387     Label done;
  2388     __ cmp(value, 0);
  2389     __ br(Assembler::equal, false, Assembler::pn, done);
  2390     __ delayed()->nop();
  2391     load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
  2392     load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
  2394     // get instance klass
  2395     load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
  2396     // perform the fast part of the checking logic
  2397     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL);
  2399     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
  2400     assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
  2401     __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
  2402     __ delayed()->nop();
  2403     __ cmp(G3, 0);
  2404     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
  2405     __ delayed()->nop();
  2406     __ bind(done);
  2407   } else if (op->code() == lir_checkcast) {
  2408     // we always need a stub for the failure case.
  2409     CodeStub* stub = op->stub();
  2410     Register obj = op->object()->as_register();
  2411     Register k_RInfo = op->tmp1()->as_register();
  2412     Register klass_RInfo = op->tmp2()->as_register();
  2413     Register dst = op->result_opr()->as_register();
  2414     Register Rtmp1 = op->tmp3()->as_register();
  2415     ciKlass* k = op->klass();
  2417     if (obj == k_RInfo) {
  2418       k_RInfo = klass_RInfo;
  2419       klass_RInfo = obj;
  2421     if (op->profiled_method() != NULL) {
  2422       ciMethod* method = op->profiled_method();
  2423       int bci          = op->profiled_bci();
  2425       // We need two temporaries to perform this operation on SPARC,
  2426       // so to keep things simple we perform a redundant test here
  2427       Label profile_done;
  2428       __ cmp(obj, 0);
  2429       __ br(Assembler::notEqual, false, Assembler::pn, profile_done);
  2430       __ delayed()->nop();
  2431       // Object is null; update methodDataOop
  2432       ciMethodData* md = method->method_data();
  2433       if (md == NULL) {
  2434         bailout("out of memory building methodDataOop");
  2435         return;
  2437       ciProfileData* data = md->bci_to_data(bci);
  2438       assert(data != NULL,       "need data for checkcast");
  2439       assert(data->is_BitData(), "need BitData for checkcast");
  2440       Register mdo      = k_RInfo;
  2441       Register data_val = Rtmp1;
  2442       jobject2reg(md->encoding(), mdo);
  2444       int mdo_offset_bias = 0;
  2445       if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
  2446         // The offset is large so bias the mdo by the base of the slot so
  2447         // that the ld can use simm13s to reference the slots of the data
  2448         mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
  2449         __ set(mdo_offset_bias, data_val);
  2450         __ add(mdo, data_val, mdo);
  2454       Address flags_addr(mdo, 0, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
  2455       __ ldub(flags_addr, data_val);
  2456       __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
  2457       __ stb(data_val, flags_addr);
  2458       __ bind(profile_done);
  2461     Label done;
  2462     // patching may screw with our temporaries on sparc,
  2463     // so let's do it before loading the class
  2464     if (k->is_loaded()) {
  2465       jobject2reg(k->encoding(), k_RInfo);
  2466     } else {
  2467       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
  2469     assert(obj != k_RInfo, "must be different");
  2470     __ cmp(obj, 0);
  2471     __ br(Assembler::equal, false, Assembler::pn, done);
  2472     __ delayed()->nop();
  2474     // get object class
  2475     // not a safepoint as obj null check happens earlier
  2476     load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
  2477     if (op->fast_check()) {
  2478       assert_different_registers(klass_RInfo, k_RInfo);
  2479       __ cmp(k_RInfo, klass_RInfo);
  2480       __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
  2481       __ delayed()->nop();
  2482       __ bind(done);
  2483     } else {
  2484       bool need_slow_path = true;
  2485       if (k->is_loaded()) {
  2486         if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
  2487           need_slow_path = false;
  2488         // perform the fast part of the checking logic
  2489         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
  2490                                          (need_slow_path ? &done : NULL),
  2491                                          stub->entry(), NULL,
  2492                                          RegisterOrConstant(k->super_check_offset()));
  2493       } else {
  2494         // perform the fast part of the checking logic
  2495         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
  2496                                          &done, stub->entry(), NULL);
  2498       if (need_slow_path) {
  2499         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
  2500         assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
  2501         __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
  2502         __ delayed()->nop();
  2503         __ cmp(G3, 0);
  2504         __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
  2505         __ delayed()->nop();
  2507       __ bind(done);
  2509     __ mov(obj, dst);
  2510   } else if (code == lir_instanceof) {
  2511     Register obj = op->object()->as_register();
  2512     Register k_RInfo = op->tmp1()->as_register();
  2513     Register klass_RInfo = op->tmp2()->as_register();
  2514     Register dst = op->result_opr()->as_register();
  2515     Register Rtmp1 = op->tmp3()->as_register();
  2516     ciKlass* k = op->klass();
  2518     Label done;
  2519     if (obj == k_RInfo) {
  2520       k_RInfo = klass_RInfo;
  2521       klass_RInfo = obj;
  2523     // patching may screw with our temporaries on sparc,
  2524     // so let's do it before loading the class
  2525     if (k->is_loaded()) {
  2526       jobject2reg(k->encoding(), k_RInfo);
  2527     } else {
  2528       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
  2530     assert(obj != k_RInfo, "must be different");
  2531     __ cmp(obj, 0);
  2532     __ br(Assembler::equal, true, Assembler::pn, done);
  2533     __ delayed()->set(0, dst);
  2535     // get object class
  2536     // not a safepoint as obj null check happens earlier
  2537     load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
  2538     if (op->fast_check()) {
  2539       __ cmp(k_RInfo, klass_RInfo);
  2540       __ br(Assembler::equal, true, Assembler::pt, done);
  2541       __ delayed()->set(1, dst);
  2542       __ set(0, dst);
  2543       __ bind(done);
  2544     } else {
  2545       bool need_slow_path = true;
  2546       if (k->is_loaded()) {
  2547         if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
  2548           need_slow_path = false;
  2549         // perform the fast part of the checking logic
  2550         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg,
  2551                                          (need_slow_path ? &done : NULL),
  2552                                          (need_slow_path ? &done : NULL), NULL,
  2553                                          RegisterOrConstant(k->super_check_offset()),
  2554                                          dst);
  2555       } else {
  2556         assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
  2557         // perform the fast part of the checking logic
  2558         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst,
  2559                                          &done, &done, NULL,
  2560                                          RegisterOrConstant(-1),
  2561                                          dst);
  2563       if (need_slow_path) {
  2564         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
  2565         assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
  2566         __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
  2567         __ delayed()->nop();
  2568         __ mov(G3, dst);
  2570       __ bind(done);
  2572   } else {
  2573     ShouldNotReachHere();
  2579 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
  2580   if (op->code() == lir_cas_long) {
  2581     assert(VM_Version::supports_cx8(), "wrong machine");
  2582     Register addr = op->addr()->as_pointer_register();
  2583     Register cmp_value_lo = op->cmp_value()->as_register_lo();
  2584     Register cmp_value_hi = op->cmp_value()->as_register_hi();
  2585     Register new_value_lo = op->new_value()->as_register_lo();
  2586     Register new_value_hi = op->new_value()->as_register_hi();
  2587     Register t1 = op->tmp1()->as_register();
  2588     Register t2 = op->tmp2()->as_register();
  2589 #ifdef _LP64
  2590     __ mov(cmp_value_lo, t1);
  2591     __ mov(new_value_lo, t2);
  2592 #else
  2593     // move high and low halves of long values into single registers
  2594     __ sllx(cmp_value_hi, 32, t1);         // shift high half into temp reg
  2595     __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
  2596     __ or3(t1, cmp_value_lo, t1);          // t1 holds 64-bit compare value
  2597     __ sllx(new_value_hi, 32, t2);
  2598     __ srl(new_value_lo, 0, new_value_lo);
  2599     __ or3(t2, new_value_lo, t2);          // t2 holds 64-bit value to swap
  2600 #endif
  2601     // perform the compare and swap operation
  2602     __ casx(addr, t1, t2);
  2603     // generate condition code - if the swap succeeded, t2 ("new value" reg) was
  2604     // overwritten with the original value in "addr" and will be equal to t1.
  2605     __ cmp(t1, t2);
  2607   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
  2608     Register addr = op->addr()->as_pointer_register();
  2609     Register cmp_value = op->cmp_value()->as_register();
  2610     Register new_value = op->new_value()->as_register();
  2611     Register t1 = op->tmp1()->as_register();
  2612     Register t2 = op->tmp2()->as_register();
  2613     __ mov(cmp_value, t1);
  2614     __ mov(new_value, t2);
  2615 #ifdef _LP64
  2616     if (op->code() == lir_cas_obj) {
  2617       __ casx(addr, t1, t2);
  2618     } else
  2619 #endif
  2621         __ cas(addr, t1, t2);
  2623     __ cmp(t1, t2);
  2624   } else {
  2625     Unimplemented();
  2629 void LIR_Assembler::set_24bit_FPU() {
  2630   Unimplemented();
  2634 void LIR_Assembler::reset_FPU() {
  2635   Unimplemented();
  2639 void LIR_Assembler::breakpoint() {
  2640   __ breakpoint_trap();
  2644 void LIR_Assembler::push(LIR_Opr opr) {
  2645   Unimplemented();
  2649 void LIR_Assembler::pop(LIR_Opr opr) {
  2650   Unimplemented();
  2654 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
  2655   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
  2656   Register dst = dst_opr->as_register();
  2657   Register reg = mon_addr.base();
  2658   int offset = mon_addr.disp();
  2659   // compute pointer to BasicLock
  2660   if (mon_addr.is_simm13()) {
  2661     __ add(reg, offset, dst);
  2662   } else {
  2663     __ set(offset, dst);
  2664     __ add(dst, reg, dst);
  2669 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
  2670   Register obj = op->obj_opr()->as_register();
  2671   Register hdr = op->hdr_opr()->as_register();
  2672   Register lock = op->lock_opr()->as_register();
  2674   // obj may not be an oop
  2675   if (op->code() == lir_lock) {
  2676     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
  2677     if (UseFastLocking) {
  2678       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
  2679       // add debug info for NullPointerException only if one is possible
  2680       if (op->info() != NULL) {
  2681         add_debug_info_for_null_check_here(op->info());
  2683       __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
  2684     } else {
  2685       // always do slow locking
  2686       // note: the slow locking code could be inlined here, however if we use
  2687       //       slow locking, speed doesn't matter anyway and this solution is
  2688       //       simpler and requires less duplicated code - additionally, the
  2689       //       slow locking code is the same in either case which simplifies
  2690       //       debugging
  2691       __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
  2692       __ delayed()->nop();
  2694   } else {
  2695     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
  2696     if (UseFastLocking) {
  2697       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
  2698       __ unlock_object(hdr, obj, lock, *op->stub()->entry());
  2699     } else {
  2700       // always do slow unlocking
  2701       // note: the slow unlocking code could be inlined here, however if we use
  2702       //       slow unlocking, speed doesn't matter anyway and this solution is
  2703       //       simpler and requires less duplicated code - additionally, the
  2704       //       slow unlocking code is the same in either case which simplifies
  2705       //       debugging
  2706       __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
  2707       __ delayed()->nop();
  2710   __ bind(*op->stub()->continuation());
  2714 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
  2715   ciMethod* method = op->profiled_method();
  2716   int bci          = op->profiled_bci();
  2718   // Update counter for all call types
  2719   ciMethodData* md = method->method_data();
  2720   if (md == NULL) {
  2721     bailout("out of memory building methodDataOop");
  2722     return;
  2724   ciProfileData* data = md->bci_to_data(bci);
  2725   assert(data->is_CounterData(), "need CounterData for calls");
  2726   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
  2727   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
  2728   Register mdo  = op->mdo()->as_register();
  2729   Register tmp1 = op->tmp1()->as_register();
  2730   jobject2reg(md->encoding(), mdo);
  2731   int mdo_offset_bias = 0;
  2732   if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
  2733                             data->size_in_bytes())) {
  2734     // The offset is large so bias the mdo by the base of the slot so
  2735     // that the ld can use simm13s to reference the slots of the data
  2736     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
  2737     __ set(mdo_offset_bias, O7);
  2738     __ add(mdo, O7, mdo);
  2741   Address counter_addr(mdo, 0, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
  2742   __ lduw(counter_addr, tmp1);
  2743   __ add(tmp1, DataLayout::counter_increment, tmp1);
  2744   __ stw(tmp1, counter_addr);
  2745   Bytecodes::Code bc = method->java_code_at_bci(bci);
  2746   // Perform additional virtual call profiling for invokevirtual and
  2747   // invokeinterface bytecodes
  2748   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
  2749       Tier1ProfileVirtualCalls) {
  2750     assert(op->recv()->is_single_cpu(), "recv must be allocated");
  2751     Register recv = op->recv()->as_register();
  2752     assert_different_registers(mdo, tmp1, recv);
  2753     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
  2754     ciKlass* known_klass = op->known_holder();
  2755     if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
  2756       // We know the type that will be seen at this call site; we can
  2757       // statically update the methodDataOop rather than needing to do
  2758       // dynamic tests on the receiver type
  2760       // NOTE: we should probably put a lock around this search to
  2761       // avoid collisions by concurrent compilations
  2762       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
  2763       uint i;
  2764       for (i = 0; i < VirtualCallData::row_limit(); i++) {
  2765         ciKlass* receiver = vc_data->receiver(i);
  2766         if (known_klass->equals(receiver)) {
  2767           Address data_addr(mdo, 0, md->byte_offset_of_slot(data,
  2768                                                             VirtualCallData::receiver_count_offset(i)) -
  2769                             mdo_offset_bias);
  2770           __ lduw(data_addr, tmp1);
  2771           __ add(tmp1, DataLayout::counter_increment, tmp1);
  2772           __ stw(tmp1, data_addr);
  2773           return;
  2777       // Receiver type not found in profile data; select an empty slot
  2779       // Note that this is less efficient than it should be because it
  2780       // always does a write to the receiver part of the
  2781       // VirtualCallData rather than just the first time
  2782       for (i = 0; i < VirtualCallData::row_limit(); i++) {
  2783         ciKlass* receiver = vc_data->receiver(i);
  2784         if (receiver == NULL) {
  2785           Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
  2786                             mdo_offset_bias);
  2787           jobject2reg(known_klass->encoding(), tmp1);
  2788           __ st_ptr(tmp1, recv_addr);
  2789           Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
  2790                             mdo_offset_bias);
  2791           __ lduw(data_addr, tmp1);
  2792           __ add(tmp1, DataLayout::counter_increment, tmp1);
  2793           __ stw(tmp1, data_addr);
  2794           return;
  2797     } else {
  2798       load(Address(recv, 0, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
  2799       Label update_done;
  2800       uint i;
  2801       for (i = 0; i < VirtualCallData::row_limit(); i++) {
  2802         Label next_test;
  2803         // See if the receiver is receiver[n].
  2804         Address receiver_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
  2805                               mdo_offset_bias);
  2806         __ ld_ptr(receiver_addr, tmp1);
  2807         __ verify_oop(tmp1);
  2808         __ cmp(recv, tmp1);
  2809         __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
  2810         __ delayed()->nop();
  2811         Address data_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
  2812                           mdo_offset_bias);
  2813         __ lduw(data_addr, tmp1);
  2814         __ add(tmp1, DataLayout::counter_increment, tmp1);
  2815         __ stw(tmp1, data_addr);
  2816         __ br(Assembler::always, false, Assembler::pt, update_done);
  2817         __ delayed()->nop();
  2818         __ bind(next_test);
  2821       // Didn't find receiver; find next empty slot and fill it in
  2822       for (i = 0; i < VirtualCallData::row_limit(); i++) {
  2823         Label next_test;
  2824         Address recv_addr(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
  2825                           mdo_offset_bias);
  2826         load(recv_addr, tmp1, T_OBJECT);
  2827         __ tst(tmp1);
  2828         __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
  2829         __ delayed()->nop();
  2830         __ st_ptr(recv, recv_addr);
  2831         __ set(DataLayout::counter_increment, tmp1);
  2832         __ st_ptr(tmp1, Address(mdo, 0, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
  2833                                 mdo_offset_bias));
  2834         if (i < (VirtualCallData::row_limit() - 1)) {
  2835           __ br(Assembler::always, false, Assembler::pt, update_done);
  2836           __ delayed()->nop();
  2838         __ bind(next_test);
  2841       __ bind(update_done);
  2847 void LIR_Assembler::align_backward_branch_target() {
  2848   __ align(16);
  2852 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
  2853   // make sure we are expecting a delay
  2854   // this has the side effect of clearing the delay state
  2855   // so we can use _masm instead of _masm->delayed() to do the
  2856   // code generation.
  2857   __ delayed();
  2859   // make sure we only emit one instruction
  2860   int offset = code_offset();
  2861   op->delay_op()->emit_code(this);
  2862 #ifdef ASSERT
  2863   if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
  2864     op->delay_op()->print();
  2866   assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
  2867          "only one instruction can go in a delay slot");
  2868 #endif
  2870   // we may also be emitting the call info for the instruction
  2871   // which we are the delay slot of.
  2872   CodeEmitInfo * call_info = op->call_info();
  2873   if (call_info) {
  2874     add_call_info(code_offset(), call_info);
  2877   if (VerifyStackAtCalls) {
  2878     _masm->sub(FP, SP, O7);
  2879     _masm->cmp(O7, initial_frame_size_in_bytes());
  2880     _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
  2885 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
  2886   assert(left->is_register(), "can only handle registers");
  2888   if (left->is_single_cpu()) {
  2889     __ neg(left->as_register(), dest->as_register());
  2890   } else if (left->is_single_fpu()) {
  2891     __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
  2892   } else if (left->is_double_fpu()) {
  2893     __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
  2894   } else {
  2895     assert (left->is_double_cpu(), "Must be a long");
  2896     Register Rlow = left->as_register_lo();
  2897     Register Rhi = left->as_register_hi();
  2898 #ifdef _LP64
  2899     __ sub(G0, Rlow, dest->as_register_lo());
  2900 #else
  2901     __ subcc(G0, Rlow, dest->as_register_lo());
  2902     __ subc (G0, Rhi,  dest->as_register_hi());
  2903 #endif
  2908 void LIR_Assembler::fxch(int i) {
  2909   Unimplemented();
  2912 void LIR_Assembler::fld(int i) {
  2913   Unimplemented();
  2916 void LIR_Assembler::ffree(int i) {
  2917   Unimplemented();
  2920 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
  2921                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
  2923   // if tmp is invalid, then the function being called doesn't destroy the thread
  2924   if (tmp->is_valid()) {
  2925     __ save_thread(tmp->as_register());
  2927   __ call(dest, relocInfo::runtime_call_type);
  2928   __ delayed()->nop();
  2929   if (info != NULL) {
  2930     add_call_info_here(info);
  2932   if (tmp->is_valid()) {
  2933     __ restore_thread(tmp->as_register());
  2936 #ifdef ASSERT
  2937   __ verify_thread();
  2938 #endif // ASSERT
  2942 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
  2943 #ifdef _LP64
  2944   ShouldNotReachHere();
  2945 #endif
  2947   NEEDS_CLEANUP;
  2948   if (type == T_LONG) {
  2949     LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
  2951     // (extended to allow indexed as well as constant displaced for JSR-166)
  2952     Register idx = noreg; // contains either constant offset or index
  2954     int disp = mem_addr->disp();
  2955     if (mem_addr->index() == LIR_OprFact::illegalOpr) {
  2956       if (!Assembler::is_simm13(disp)) {
  2957         idx = O7;
  2958         __ set(disp, idx);
  2960     } else {
  2961       assert(disp == 0, "not both indexed and disp");
  2962       idx = mem_addr->index()->as_register();
  2965     int null_check_offset = -1;
  2967     Register base = mem_addr->base()->as_register();
  2968     if (src->is_register() && dest->is_address()) {
  2969       // G4 is high half, G5 is low half
  2970       if (VM_Version::v9_instructions_work()) {
  2971         // clear the top bits of G5, and scale up G4
  2972         __ srl (src->as_register_lo(),  0, G5);
  2973         __ sllx(src->as_register_hi(), 32, G4);
  2974         // combine the two halves into the 64 bits of G4
  2975         __ or3(G4, G5, G4);
  2976         null_check_offset = __ offset();
  2977         if (idx == noreg) {
  2978           __ stx(G4, base, disp);
  2979         } else {
  2980           __ stx(G4, base, idx);
  2982       } else {
  2983         __ mov (src->as_register_hi(), G4);
  2984         __ mov (src->as_register_lo(), G5);
  2985         null_check_offset = __ offset();
  2986         if (idx == noreg) {
  2987           __ std(G4, base, disp);
  2988         } else {
  2989           __ std(G4, base, idx);
  2992     } else if (src->is_address() && dest->is_register()) {
  2993       null_check_offset = __ offset();
  2994       if (VM_Version::v9_instructions_work()) {
  2995         if (idx == noreg) {
  2996           __ ldx(base, disp, G5);
  2997         } else {
  2998           __ ldx(base, idx, G5);
  3000         __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
  3001         __ mov (G5, dest->as_register_lo());     // copy low half into lo
  3002       } else {
  3003         if (idx == noreg) {
  3004           __ ldd(base, disp, G4);
  3005         } else {
  3006           __ ldd(base, idx, G4);
  3008         // G4 is high half, G5 is low half
  3009         __ mov (G4, dest->as_register_hi());
  3010         __ mov (G5, dest->as_register_lo());
  3012     } else {
  3013       Unimplemented();
  3015     if (info != NULL) {
  3016       add_debug_info_for_null_check(null_check_offset, info);
  3019   } else {
  3020     // use normal move for all other volatiles since they don't need
  3021     // special handling to remain atomic.
  3022     move_op(src, dest, type, lir_patch_none, info, false, false);
  3026 void LIR_Assembler::membar() {
  3027   // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
  3028   __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
  3031 void LIR_Assembler::membar_acquire() {
  3032   // no-op on TSO
  3035 void LIR_Assembler::membar_release() {
  3036   // no-op on TSO
  3039 // Macro to Pack two sequential registers containing 32 bit values
  3040 // into a single 64 bit register.
  3041 // rs and rs->successor() are packed into rd
  3042 // rd and rs may be the same register.
  3043 // Note: rs and rs->successor() are destroyed.
  3044 void LIR_Assembler::pack64( Register rs, Register rd ) {
  3045   __ sllx(rs, 32, rs);
  3046   __ srl(rs->successor(), 0, rs->successor());
  3047   __ or3(rs, rs->successor(), rd);
  3050 // Macro to unpack a 64 bit value in a register into
  3051 // two sequential registers.
  3052 // rd is unpacked into rd and rd->successor()
  3053 void LIR_Assembler::unpack64( Register rd ) {
  3054   __ mov(rd, rd->successor());
  3055   __ srax(rd, 32, rd);
  3056   __ sra(rd->successor(), 0, rd->successor());
  3060 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
  3061   LIR_Address* addr = addr_opr->as_address_ptr();
  3062   assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
  3063   __ add(addr->base()->as_register(), addr->disp(), dest->as_register());
  3067 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
  3068   assert(result_reg->is_register(), "check");
  3069   __ mov(G2_thread, result_reg->as_register());
  3073 void LIR_Assembler::peephole(LIR_List* lir) {
  3074   LIR_OpList* inst = lir->instructions_list();
  3075   for (int i = 0; i < inst->length(); i++) {
  3076     LIR_Op* op = inst->at(i);
  3077     switch (op->code()) {
  3078       case lir_cond_float_branch:
  3079       case lir_branch: {
  3080         LIR_OpBranch* branch = op->as_OpBranch();
  3081         assert(branch->info() == NULL, "shouldn't be state on branches anymore");
  3082         LIR_Op* delay_op = NULL;
  3083         // we'd like to be able to pull following instructions into
  3084         // this slot but we don't know enough to do it safely yet so
  3085         // only optimize block to block control flow.
  3086         if (LIRFillDelaySlots && branch->block()) {
  3087           LIR_Op* prev = inst->at(i - 1);
  3088           if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
  3089             // swap previous instruction into delay slot
  3090             inst->at_put(i - 1, op);
  3091             inst->at_put(i, new LIR_OpDelay(prev, op->info()));
  3092 #ifndef PRODUCT
  3093             if (LIRTracePeephole) {
  3094               tty->print_cr("delayed");
  3095               inst->at(i - 1)->print();
  3096               inst->at(i)->print();
  3098 #endif
  3099             continue;
  3103         if (!delay_op) {
  3104           delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
  3106         inst->insert_before(i + 1, delay_op);
  3107         break;
  3109       case lir_static_call:
  3110       case lir_virtual_call:
  3111       case lir_icvirtual_call:
  3112       case lir_optvirtual_call: {
  3113         LIR_Op* delay_op = NULL;
  3114         LIR_Op* prev = inst->at(i - 1);
  3115         if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
  3116             (op->code() != lir_virtual_call ||
  3117              !prev->result_opr()->is_single_cpu() ||
  3118              prev->result_opr()->as_register() != O0) &&
  3119             LIR_Assembler::is_single_instruction(prev)) {
  3120           // Only moves without info can be put into the delay slot.
  3121           // Also don't allow the setup of the receiver in the delay
  3122           // slot for vtable calls.
  3123           inst->at_put(i - 1, op);
  3124           inst->at_put(i, new LIR_OpDelay(prev, op->info()));
  3125 #ifndef PRODUCT
  3126           if (LIRTracePeephole) {
  3127             tty->print_cr("delayed");
  3128             inst->at(i - 1)->print();
  3129             inst->at(i)->print();
  3131 #endif
  3132           continue;
  3135         if (!delay_op) {
  3136           delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
  3137           inst->insert_before(i + 1, delay_op);
  3139         break;
  3148 #undef __

mercurial