src/cpu/sparc/vm/stubGenerator_sparc.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/macroAssembler.inline.hpp"
    27 #include "interpreter/interpreter.hpp"
    28 #include "nativeInst_sparc.hpp"
    29 #include "oops/instanceOop.hpp"
    30 #include "oops/method.hpp"
    31 #include "oops/objArrayKlass.hpp"
    32 #include "oops/oop.inline.hpp"
    33 #include "prims/methodHandles.hpp"
    34 #include "runtime/frame.inline.hpp"
    35 #include "runtime/handles.inline.hpp"
    36 #include "runtime/sharedRuntime.hpp"
    37 #include "runtime/stubCodeGenerator.hpp"
    38 #include "runtime/stubRoutines.hpp"
    39 #include "runtime/thread.inline.hpp"
    40 #include "utilities/top.hpp"
    41 #ifdef COMPILER2
    42 #include "opto/runtime.hpp"
    43 #endif
    45 // Declaration and definition of StubGenerator (no .hpp file).
    46 // For a more detailed description of the stub routine structure
    47 // see the comment in stubRoutines.hpp.
    49 #define __ _masm->
    51 #ifdef PRODUCT
    52 #define BLOCK_COMMENT(str) /* nothing */
    53 #else
    54 #define BLOCK_COMMENT(str) __ block_comment(str)
    55 #endif
    57 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    59 // Note:  The register L7 is used as L7_thread_cache, and may not be used
    60 //        any other way within this module.
    63 static const Register& Lstub_temp = L2;
    65 // -------------------------------------------------------------------------------------------------------------------------
    66 // Stub Code definitions
    68 static address handle_unsafe_access() {
    69   JavaThread* thread = JavaThread::current();
    70   address pc  = thread->saved_exception_pc();
    71   address npc = thread->saved_exception_npc();
    72   // pc is the instruction which we must emulate
    73   // doing a no-op is fine:  return garbage from the load
    75   // request an async exception
    76   thread->set_pending_unsafe_access_error();
    78   // return address of next instruction to execute
    79   return npc;
    80 }
    82 class StubGenerator: public StubCodeGenerator {
    83  private:
    85 #ifdef PRODUCT
    86 #define inc_counter_np(a,b,c)
    87 #else
    88 #define inc_counter_np(counter, t1, t2) \
    89   BLOCK_COMMENT("inc_counter " #counter); \
    90   __ inc_counter(&counter, t1, t2);
    91 #endif
    93   //----------------------------------------------------------------------------------------------------
    94   // Call stubs are used to call Java from C
    96   address generate_call_stub(address& return_pc) {
    97     StubCodeMark mark(this, "StubRoutines", "call_stub");
    98     address start = __ pc();
   100     // Incoming arguments:
   101     //
   102     // o0         : call wrapper address
   103     // o1         : result (address)
   104     // o2         : result type
   105     // o3         : method
   106     // o4         : (interpreter) entry point
   107     // o5         : parameters (address)
   108     // [sp + 0x5c]: parameter size (in words)
   109     // [sp + 0x60]: thread
   110     //
   111     // +---------------+ <--- sp + 0
   112     // |               |
   113     // . reg save area .
   114     // |               |
   115     // +---------------+ <--- sp + 0x40
   116     // |               |
   117     // . extra 7 slots .
   118     // |               |
   119     // +---------------+ <--- sp + 0x5c
   120     // |  param. size  |
   121     // +---------------+ <--- sp + 0x60
   122     // |    thread     |
   123     // +---------------+
   124     // |               |
   126     // note: if the link argument position changes, adjust
   127     //       the code in frame::entry_frame_call_wrapper()
   129     const Argument link           = Argument(0, false); // used only for GC
   130     const Argument result         = Argument(1, false);
   131     const Argument result_type    = Argument(2, false);
   132     const Argument method         = Argument(3, false);
   133     const Argument entry_point    = Argument(4, false);
   134     const Argument parameters     = Argument(5, false);
   135     const Argument parameter_size = Argument(6, false);
   136     const Argument thread         = Argument(7, false);
   138     // setup thread register
   139     __ ld_ptr(thread.as_address(), G2_thread);
   140     __ reinit_heapbase();
   142 #ifdef ASSERT
   143     // make sure we have no pending exceptions
   144     { const Register t = G3_scratch;
   145       Label L;
   146       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
   147       __ br_null_short(t, Assembler::pt, L);
   148       __ stop("StubRoutines::call_stub: entered with pending exception");
   149       __ bind(L);
   150     }
   151 #endif
   153     // create activation frame & allocate space for parameters
   154     { const Register t = G3_scratch;
   155       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
   156       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
   157       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
   158       __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
   159       __ neg(t);                                                // negate so it can be used with save
   160       __ save(SP, t, SP);                                       // setup new frame
   161     }
   163     // +---------------+ <--- sp + 0
   164     // |               |
   165     // . reg save area .
   166     // |               |
   167     // +---------------+ <--- sp + 0x40
   168     // |               |
   169     // . extra 7 slots .
   170     // |               |
   171     // +---------------+ <--- sp + 0x5c
   172     // |  empty slot   |      (only if parameter size is even)
   173     // +---------------+
   174     // |               |
   175     // .  parameters   .
   176     // |               |
   177     // +---------------+ <--- fp + 0
   178     // |               |
   179     // . reg save area .
   180     // |               |
   181     // +---------------+ <--- fp + 0x40
   182     // |               |
   183     // . extra 7 slots .
   184     // |               |
   185     // +---------------+ <--- fp + 0x5c
   186     // |  param. size  |
   187     // +---------------+ <--- fp + 0x60
   188     // |    thread     |
   189     // +---------------+
   190     // |               |
   192     // pass parameters if any
   193     BLOCK_COMMENT("pass parameters if any");
   194     { const Register src = parameters.as_in().as_register();
   195       const Register dst = Lentry_args;
   196       const Register tmp = G3_scratch;
   197       const Register cnt = G4_scratch;
   199       // test if any parameters & setup of Lentry_args
   200       Label exit;
   201       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
   202       __ add( FP, STACK_BIAS, dst );
   203       __ cmp_zero_and_br(Assembler::zero, cnt, exit);
   204       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
   206       // copy parameters if any
   207       Label loop;
   208       __ BIND(loop);
   209       // Store parameter value
   210       __ ld_ptr(src, 0, tmp);
   211       __ add(src, BytesPerWord, src);
   212       __ st_ptr(tmp, dst, 0);
   213       __ deccc(cnt);
   214       __ br(Assembler::greater, false, Assembler::pt, loop);
   215       __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
   217       // done
   218       __ BIND(exit);
   219     }
   221     // setup parameters, method & call Java function
   222 #ifdef ASSERT
   223     // layout_activation_impl checks it's notion of saved SP against
   224     // this register, so if this changes update it as well.
   225     const Register saved_SP = Lscratch;
   226     __ mov(SP, saved_SP);                               // keep track of SP before call
   227 #endif
   229     // setup parameters
   230     const Register t = G3_scratch;
   231     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
   232     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
   233     __ sub(FP, t, Gargs);                              // setup parameter pointer
   234 #ifdef _LP64
   235     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
   236 #endif
   237     __ mov(SP, O5_savedSP);
   240     // do the call
   241     //
   242     // the following register must be setup:
   243     //
   244     // G2_thread
   245     // G5_method
   246     // Gargs
   247     BLOCK_COMMENT("call Java function");
   248     __ jmpl(entry_point.as_in().as_register(), G0, O7);
   249     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
   251     BLOCK_COMMENT("call_stub_return_address:");
   252     return_pc = __ pc();
   254     // The callee, if it wasn't interpreted, can return with SP changed so
   255     // we can no longer assert of change of SP.
   257     // store result depending on type
   258     // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
   259     //  is treated as T_INT)
   260     { const Register addr = result     .as_in().as_register();
   261       const Register type = result_type.as_in().as_register();
   262       Label is_long, is_float, is_double, is_object, exit;
   263       __            cmp(type, T_OBJECT);  __ br(Assembler::equal, false, Assembler::pn, is_object);
   264       __ delayed()->cmp(type, T_FLOAT);   __ br(Assembler::equal, false, Assembler::pn, is_float);
   265       __ delayed()->cmp(type, T_DOUBLE);  __ br(Assembler::equal, false, Assembler::pn, is_double);
   266       __ delayed()->cmp(type, T_LONG);    __ br(Assembler::equal, false, Assembler::pn, is_long);
   267       __ delayed()->nop();
   269       // store int result
   270       __ st(O0, addr, G0);
   272       __ BIND(exit);
   273       __ ret();
   274       __ delayed()->restore();
   276       __ BIND(is_object);
   277       __ ba(exit);
   278       __ delayed()->st_ptr(O0, addr, G0);
   280       __ BIND(is_float);
   281       __ ba(exit);
   282       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
   284       __ BIND(is_double);
   285       __ ba(exit);
   286       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
   288       __ BIND(is_long);
   289 #ifdef _LP64
   290       __ ba(exit);
   291       __ delayed()->st_long(O0, addr, G0);      // store entire long
   292 #else
   293 #if defined(COMPILER2)
   294   // All return values are where we want them, except for Longs.  C2 returns
   295   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
   296   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
   297   // build we simply always use G1.
   298   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
   299   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   300   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
   302       __ ba(exit);
   303       __ delayed()->stx(G1, addr, G0);  // store entire long
   304 #else
   305       __ st(O1, addr, BytesPerInt);
   306       __ ba(exit);
   307       __ delayed()->st(O0, addr, G0);
   308 #endif /* COMPILER2 */
   309 #endif /* _LP64 */
   310      }
   311      return start;
   312   }
   315   //----------------------------------------------------------------------------------------------------
   316   // Return point for a Java call if there's an exception thrown in Java code.
   317   // The exception is caught and transformed into a pending exception stored in
   318   // JavaThread that can be tested from within the VM.
   319   //
   320   // Oexception: exception oop
   322   address generate_catch_exception() {
   323     StubCodeMark mark(this, "StubRoutines", "catch_exception");
   325     address start = __ pc();
   326     // verify that thread corresponds
   327     __ verify_thread();
   329     const Register& temp_reg = Gtemp;
   330     Address pending_exception_addr    (G2_thread, Thread::pending_exception_offset());
   331     Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset   ());
   332     Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset   ());
   334     // set pending exception
   335     __ verify_oop(Oexception);
   336     __ st_ptr(Oexception, pending_exception_addr);
   337     __ set((intptr_t)__FILE__, temp_reg);
   338     __ st_ptr(temp_reg, exception_file_offset_addr);
   339     __ set((intptr_t)__LINE__, temp_reg);
   340     __ st(temp_reg, exception_line_offset_addr);
   342     // complete return to VM
   343     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
   345     AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
   346     __ jump_to(stub_ret, temp_reg);
   347     __ delayed()->nop();
   349     return start;
   350   }
   353   //----------------------------------------------------------------------------------------------------
   354   // Continuation point for runtime calls returning with a pending exception
   355   // The pending exception check happened in the runtime or native call stub
   356   // The pending exception in Thread is converted into a Java-level exception
   357   //
   358   // Contract with Java-level exception handler: O0 = exception
   359   //                                             O1 = throwing pc
   361   address generate_forward_exception() {
   362     StubCodeMark mark(this, "StubRoutines", "forward_exception");
   363     address start = __ pc();
   365     // Upon entry, O7 has the return address returning into Java
   366     // (interpreted or compiled) code; i.e. the return address
   367     // becomes the throwing pc.
   369     const Register& handler_reg = Gtemp;
   371     Address exception_addr(G2_thread, Thread::pending_exception_offset());
   373 #ifdef ASSERT
   374     // make sure that this code is only executed if there is a pending exception
   375     { Label L;
   376       __ ld_ptr(exception_addr, Gtemp);
   377       __ br_notnull_short(Gtemp, Assembler::pt, L);
   378       __ stop("StubRoutines::forward exception: no pending exception (1)");
   379       __ bind(L);
   380     }
   381 #endif
   383     // compute exception handler into handler_reg
   384     __ get_thread();
   385     __ ld_ptr(exception_addr, Oexception);
   386     __ verify_oop(Oexception);
   387     __ save_frame(0);             // compensates for compiler weakness
   388     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
   389     BLOCK_COMMENT("call exception_handler_for_return_address");
   390     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
   391     __ mov(O0, handler_reg);
   392     __ restore();                 // compensates for compiler weakness
   394     __ ld_ptr(exception_addr, Oexception);
   395     __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
   397 #ifdef ASSERT
   398     // make sure exception is set
   399     { Label L;
   400       __ br_notnull_short(Oexception, Assembler::pt, L);
   401       __ stop("StubRoutines::forward exception: no pending exception (2)");
   402       __ bind(L);
   403     }
   404 #endif
   405     // jump to exception handler
   406     __ jmp(handler_reg, 0);
   407     // clear pending exception
   408     __ delayed()->st_ptr(G0, exception_addr);
   410     return start;
   411   }
   413   // Safefetch stubs.
   414   void generate_safefetch(const char* name, int size, address* entry,
   415                           address* fault_pc, address* continuation_pc) {
   416     // safefetch signatures:
   417     //   int      SafeFetch32(int*      adr, int      errValue);
   418     //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
   419     //
   420     // arguments:
   421     //   o0 = adr
   422     //   o1 = errValue
   423     //
   424     // result:
   425     //   o0  = *adr or errValue
   427     StubCodeMark mark(this, "StubRoutines", name);
   429     // Entry point, pc or function descriptor.
   430     __ align(CodeEntryAlignment);
   431     *entry = __ pc();
   433     __ mov(O0, G1);  // g1 = o0
   434     __ mov(O1, O0);  // o0 = o1
   435     // Load *adr into c_rarg1, may fault.
   436     *fault_pc = __ pc();
   437     switch (size) {
   438       case 4:
   439         // int32_t
   440         __ ldsw(G1, 0, O0);  // o0 = [g1]
   441         break;
   442       case 8:
   443         // int64_t
   444         __ ldx(G1, 0, O0);   // o0 = [g1]
   445         break;
   446       default:
   447         ShouldNotReachHere();
   448     }
   450     // return errValue or *adr
   451     *continuation_pc = __ pc();
   452     // By convention with the trap handler we ensure there is a non-CTI
   453     // instruction in the trap shadow.
   454     __ nop();
   455     __ retl();
   456     __ delayed()->nop();
   457   }
   459   //------------------------------------------------------------------------------------------------------------------------
   460   // Continuation point for throwing of implicit exceptions that are not handled in
   461   // the current activation. Fabricates an exception oop and initiates normal
   462   // exception dispatching in this frame. Only callee-saved registers are preserved
   463   // (through the normal register window / RegisterMap handling).
   464   // If the compiler needs all registers to be preserved between the fault
   465   // point and the exception handler then it must assume responsibility for that in
   466   // AbstractCompiler::continuation_for_implicit_null_exception or
   467   // continuation_for_implicit_division_by_zero_exception. All other implicit
   468   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
   469   // either at call sites or otherwise assume that stack unwinding will be initiated,
   470   // so caller saved registers were assumed volatile in the compiler.
   472   // Note that we generate only this stub into a RuntimeStub, because it needs to be
   473   // properly traversed and ignored during GC, so we change the meaning of the "__"
   474   // macro within this method.
   475 #undef __
   476 #define __ masm->
   478   address generate_throw_exception(const char* name, address runtime_entry,
   479                                    Register arg1 = noreg, Register arg2 = noreg) {
   480 #ifdef ASSERT
   481     int insts_size = VerifyThread ? 1 * K : 600;
   482 #else
   483     int insts_size = VerifyThread ? 1 * K : 256;
   484 #endif /* ASSERT */
   485     int locs_size  = 32;
   487     CodeBuffer      code(name, insts_size, locs_size);
   488     MacroAssembler* masm = new MacroAssembler(&code);
   490     __ verify_thread();
   492     // This is an inlined and slightly modified version of call_VM
   493     // which has the ability to fetch the return PC out of thread-local storage
   494     __ assert_not_delayed();
   496     // Note that we always push a frame because on the SPARC
   497     // architecture, for all of our implicit exception kinds at call
   498     // sites, the implicit exception is taken before the callee frame
   499     // is pushed.
   500     __ save_frame(0);
   502     int frame_complete = __ offset();
   504     // Note that we always have a runtime stub frame on the top of stack by this point
   505     Register last_java_sp = SP;
   506     // 64-bit last_java_sp is biased!
   507     __ set_last_Java_frame(last_java_sp, G0);
   508     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
   509     __ save_thread(noreg);
   510     if (arg1 != noreg) {
   511       assert(arg2 != O1, "clobbered");
   512       __ mov(arg1, O1);
   513     }
   514     if (arg2 != noreg) {
   515       __ mov(arg2, O2);
   516     }
   517     // do the call
   518     BLOCK_COMMENT("call runtime_entry");
   519     __ call(runtime_entry, relocInfo::runtime_call_type);
   520     if (!VerifyThread)
   521       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
   522     else
   523       __ delayed()->nop();             // (thread already passed)
   524     __ restore_thread(noreg);
   525     __ reset_last_Java_frame();
   527     // check for pending exceptions. use Gtemp as scratch register.
   528 #ifdef ASSERT
   529     Label L;
   531     Address exception_addr(G2_thread, Thread::pending_exception_offset());
   532     Register scratch_reg = Gtemp;
   533     __ ld_ptr(exception_addr, scratch_reg);
   534     __ br_notnull_short(scratch_reg, Assembler::pt, L);
   535     __ should_not_reach_here();
   536     __ bind(L);
   537 #endif // ASSERT
   538     BLOCK_COMMENT("call forward_exception_entry");
   539     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
   540     // we use O7 linkage so that forward_exception_entry has the issuing PC
   541     __ delayed()->restore();
   543     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
   544     return stub->entry_point();
   545   }
   547 #undef __
   548 #define __ _masm->
   551   // Generate a routine that sets all the registers so we
   552   // can tell if the stop routine prints them correctly.
   553   address generate_test_stop() {
   554     StubCodeMark mark(this, "StubRoutines", "test_stop");
   555     address start = __ pc();
   557     int i;
   559     __ save_frame(0);
   561     static jfloat zero = 0.0, one = 1.0;
   563     // put addr in L0, then load through L0 to F0
   564     __ set((intptr_t)&zero, L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F0);
   565     __ set((intptr_t)&one,  L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
   567     // use add to put 2..18 in F2..F18
   568     for ( i = 2;  i <= 18;  ++i ) {
   569       __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1),  as_FloatRegister(i));
   570     }
   572     // Now put double 2 in F16, double 18 in F18
   573     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
   574     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
   576     // use add to put 20..32 in F20..F32
   577     for (i = 20; i < 32; i += 2) {
   578       __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2),  as_FloatRegister(i));
   579     }
   581     // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
   582     for ( i = 0; i < 8; ++i ) {
   583       if (i < 6) {
   584         __ set(     i, as_iRegister(i));
   585         __ set(16 + i, as_oRegister(i));
   586         __ set(24 + i, as_gRegister(i));
   587       }
   588       __ set( 8 + i, as_lRegister(i));
   589     }
   591     __ stop("testing stop");
   594     __ ret();
   595     __ delayed()->restore();
   597     return start;
   598   }
   601   address generate_stop_subroutine() {
   602     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
   603     address start = __ pc();
   605     __ stop_subroutine();
   607     return start;
   608   }
   610   address generate_flush_callers_register_windows() {
   611     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
   612     address start = __ pc();
   614     __ flushw();
   615     __ retl(false);
   616     __ delayed()->add( FP, STACK_BIAS, O0 );
   617     // The returned value must be a stack pointer whose register save area
   618     // is flushed, and will stay flushed while the caller executes.
   620     return start;
   621   }
   623   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
   624   //
   625   // Arguments:
   626   //
   627   //      exchange_value: O0
   628   //      dest:           O1
   629   //
   630   // Results:
   631   //
   632   //     O0: the value previously stored in dest
   633   //
   634   address generate_atomic_xchg() {
   635     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
   636     address start = __ pc();
   638     if (UseCASForSwap) {
   639       // Use CAS instead of swap, just in case the MP hardware
   640       // prefers to work with just one kind of synch. instruction.
   641       Label retry;
   642       __ BIND(retry);
   643       __ mov(O0, O3);       // scratch copy of exchange value
   644       __ ld(O1, 0, O2);     // observe the previous value
   645       // try to replace O2 with O3
   646       __ cas(O1, O2, O3);
   647       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
   649       __ retl(false);
   650       __ delayed()->mov(O2, O0);  // report previous value to caller
   651     } else {
   652       __ retl(false);
   653       __ delayed()->swap(O1, 0, O0);
   654     }
   656     return start;
   657   }
   660   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
   661   //
   662   // Arguments:
   663   //
   664   //      exchange_value: O0
   665   //      dest:           O1
   666   //      compare_value:  O2
   667   //
   668   // Results:
   669   //
   670   //     O0: the value previously stored in dest
   671   //
   672   address generate_atomic_cmpxchg() {
   673     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
   674     address start = __ pc();
   676     // cmpxchg(dest, compare_value, exchange_value)
   677     __ cas(O1, O2, O0);
   678     __ retl(false);
   679     __ delayed()->nop();
   681     return start;
   682   }
   684   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
   685   //
   686   // Arguments:
   687   //
   688   //      exchange_value: O1:O0
   689   //      dest:           O2
   690   //      compare_value:  O4:O3
   691   //
   692   // Results:
   693   //
   694   //     O1:O0: the value previously stored in dest
   695   //
   696   // Overwrites: G1,G2,G3
   697   //
   698   address generate_atomic_cmpxchg_long() {
   699     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
   700     address start = __ pc();
   702     __ sllx(O0, 32, O0);
   703     __ srl(O1, 0, O1);
   704     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
   705     __ sllx(O3, 32, O3);
   706     __ srl(O4, 0, O4);
   707     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
   708     __ casx(O2, O3, O0);
   709     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
   710     __ retl(false);
   711     __ delayed()->srlx(O0, 32, O0);
   713     return start;
   714   }
   717   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
   718   //
   719   // Arguments:
   720   //
   721   //      add_value: O0   (e.g., +1 or -1)
   722   //      dest:      O1
   723   //
   724   // Results:
   725   //
   726   //     O0: the new value stored in dest
   727   //
   728   // Overwrites: O3
   729   //
   730   address generate_atomic_add() {
   731     StubCodeMark mark(this, "StubRoutines", "atomic_add");
   732     address start = __ pc();
   733     __ BIND(_atomic_add_stub);
   735     Label(retry);
   736     __ BIND(retry);
   738     __ lduw(O1, 0, O2);
   739     __ add(O0, O2, O3);
   740     __ cas(O1, O2, O3);
   741     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
   742     __ retl(false);
   743     __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
   745     return start;
   746   }
   747   Label _atomic_add_stub;  // called from other stubs
   750   //------------------------------------------------------------------------------------------------------------------------
   751   // The following routine generates a subroutine to throw an asynchronous
   752   // UnknownError when an unsafe access gets a fault that could not be
   753   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
   754   //
   755   // Arguments :
   756   //
   757   //      trapping PC:    O7
   758   //
   759   // Results:
   760   //     posts an asynchronous exception, skips the trapping instruction
   761   //
   763   address generate_handler_for_unsafe_access() {
   764     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
   765     address start = __ pc();
   767     const int preserve_register_words = (64 * 2);
   768     Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
   770     Register Lthread = L7_thread_cache;
   771     int i;
   773     __ save_frame(0);
   774     __ mov(G1, L1);
   775     __ mov(G2, L2);
   776     __ mov(G3, L3);
   777     __ mov(G4, L4);
   778     __ mov(G5, L5);
   779     for (i = 0; i < 64; i += 2) {
   780       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
   781     }
   783     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
   784     BLOCK_COMMENT("call handle_unsafe_access");
   785     __ call(entry_point, relocInfo::runtime_call_type);
   786     __ delayed()->nop();
   788     __ mov(L1, G1);
   789     __ mov(L2, G2);
   790     __ mov(L3, G3);
   791     __ mov(L4, G4);
   792     __ mov(L5, G5);
   793     for (i = 0; i < 64; i += 2) {
   794       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
   795     }
   797     __ verify_thread();
   799     __ jmp(O0, 0);
   800     __ delayed()->restore();
   802     return start;
   803   }
   806   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
   807   // Arguments :
   808   //
   809   //      ret  : O0, returned
   810   //      icc/xcc: set as O0 (depending on wordSize)
   811   //      sub  : O1, argument, not changed
   812   //      super: O2, argument, not changed
   813   //      raddr: O7, blown by call
   814   address generate_partial_subtype_check() {
   815     __ align(CodeEntryAlignment);
   816     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
   817     address start = __ pc();
   818     Label miss;
   820 #if defined(COMPILER2) && !defined(_LP64)
   821     // Do not use a 'save' because it blows the 64-bit O registers.
   822     __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
   823     __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
   824     __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
   825     __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
   826     __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
   827     Register Rret   = O0;
   828     Register Rsub   = O1;
   829     Register Rsuper = O2;
   830 #else
   831     __ save_frame(0);
   832     Register Rret   = I0;
   833     Register Rsub   = I1;
   834     Register Rsuper = I2;
   835 #endif
   837     Register L0_ary_len = L0;
   838     Register L1_ary_ptr = L1;
   839     Register L2_super   = L2;
   840     Register L3_index   = L3;
   842     __ check_klass_subtype_slow_path(Rsub, Rsuper,
   843                                      L0, L1, L2, L3,
   844                                      NULL, &miss);
   846     // Match falls through here.
   847     __ addcc(G0,0,Rret);        // set Z flags, Z result
   849 #if defined(COMPILER2) && !defined(_LP64)
   850     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
   851     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
   852     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
   853     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
   854     __ retl();                  // Result in Rret is zero; flags set to Z
   855     __ delayed()->add(SP,4*wordSize,SP);
   856 #else
   857     __ ret();                   // Result in Rret is zero; flags set to Z
   858     __ delayed()->restore();
   859 #endif
   861     __ BIND(miss);
   862     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
   864 #if defined(COMPILER2) && !defined(_LP64)
   865     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
   866     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
   867     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
   868     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
   869     __ retl();                  // Result in Rret is != 0; flags set to NZ
   870     __ delayed()->add(SP,4*wordSize,SP);
   871 #else
   872     __ ret();                   // Result in Rret is != 0; flags set to NZ
   873     __ delayed()->restore();
   874 #endif
   876     return start;
   877   }
   880   // Called from MacroAssembler::verify_oop
   881   //
   882   address generate_verify_oop_subroutine() {
   883     StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
   885     address start = __ pc();
   887     __ verify_oop_subroutine();
   889     return start;
   890   }
   893   //
   894   // Verify that a register contains clean 32-bits positive value
   895   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
   896   //
   897   //  Input:
   898   //    Rint  -  32-bits value
   899   //    Rtmp  -  scratch
   900   //
   901   void assert_clean_int(Register Rint, Register Rtmp) {
   902 #if defined(ASSERT) && defined(_LP64)
   903     __ signx(Rint, Rtmp);
   904     __ cmp(Rint, Rtmp);
   905     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
   906 #endif
   907   }
   909   //
   910   //  Generate overlap test for array copy stubs
   911   //
   912   //  Input:
   913   //    O0    -  array1
   914   //    O1    -  array2
   915   //    O2    -  element count
   916   //
   917   //  Kills temps:  O3, O4
   918   //
   919   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
   920     assert(no_overlap_target != NULL, "must be generated");
   921     array_overlap_test(no_overlap_target, NULL, log2_elem_size);
   922   }
   923   void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
   924     array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
   925   }
   926   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
   927     const Register from       = O0;
   928     const Register to         = O1;
   929     const Register count      = O2;
   930     const Register to_from    = O3; // to - from
   931     const Register byte_count = O4; // count << log2_elem_size
   933       __ subcc(to, from, to_from);
   934       __ sll_ptr(count, log2_elem_size, byte_count);
   935       if (NOLp == NULL)
   936         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
   937       else
   938         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
   939       __ delayed()->cmp(to_from, byte_count);
   940       if (NOLp == NULL)
   941         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
   942       else
   943         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
   944       __ delayed()->nop();
   945   }
   947   //
   948   //  Generate pre-write barrier for array.
   949   //
   950   //  Input:
   951   //     addr     - register containing starting address
   952   //     count    - register containing element count
   953   //     tmp      - scratch register
   954   //
   955   //  The input registers are overwritten.
   956   //
   957   void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
   958     BarrierSet* bs = Universe::heap()->barrier_set();
   959     switch (bs->kind()) {
   960       case BarrierSet::G1SATBCT:
   961       case BarrierSet::G1SATBCTLogging:
   962         // With G1, don't generate the call if we statically know that the target in uninitialized
   963         if (!dest_uninitialized) {
   964           __ save_frame(0);
   965           // Save the necessary global regs... will be used after.
   966           if (addr->is_global()) {
   967             __ mov(addr, L0);
   968           }
   969           if (count->is_global()) {
   970             __ mov(count, L1);
   971           }
   972           __ mov(addr->after_save(), O0);
   973           // Get the count into O1
   974           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
   975           __ delayed()->mov(count->after_save(), O1);
   976           if (addr->is_global()) {
   977             __ mov(L0, addr);
   978           }
   979           if (count->is_global()) {
   980             __ mov(L1, count);
   981           }
   982           __ restore();
   983         }
   984         break;
   985       case BarrierSet::CardTableModRef:
   986       case BarrierSet::CardTableExtension:
   987       case BarrierSet::ModRef:
   988         break;
   989       default:
   990         ShouldNotReachHere();
   991     }
   992   }
   993   //
   994   //  Generate post-write barrier for array.
   995   //
   996   //  Input:
   997   //     addr     - register containing starting address
   998   //     count    - register containing element count
   999   //     tmp      - scratch register
  1000   //
  1001   //  The input registers are overwritten.
  1002   //
  1003   void gen_write_ref_array_post_barrier(Register addr, Register count,
  1004                                         Register tmp) {
  1005     BarrierSet* bs = Universe::heap()->barrier_set();
  1007     switch (bs->kind()) {
  1008       case BarrierSet::G1SATBCT:
  1009       case BarrierSet::G1SATBCTLogging:
  1011           // Get some new fresh output registers.
  1012           __ save_frame(0);
  1013           __ mov(addr->after_save(), O0);
  1014           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
  1015           __ delayed()->mov(count->after_save(), O1);
  1016           __ restore();
  1018         break;
  1019       case BarrierSet::CardTableModRef:
  1020       case BarrierSet::CardTableExtension:
  1022           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
  1023           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
  1024           assert_different_registers(addr, count, tmp);
  1026           Label L_loop;
  1028           __ sll_ptr(count, LogBytesPerHeapOop, count);
  1029           __ sub(count, BytesPerHeapOop, count);
  1030           __ add(count, addr, count);
  1031           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
  1032           __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
  1033           __ srl_ptr(count, CardTableModRefBS::card_shift, count);
  1034           __ sub(count, addr, count);
  1035           AddressLiteral rs(ct->byte_map_base);
  1036           __ set(rs, tmp);
  1037         __ BIND(L_loop);
  1038           __ stb(G0, tmp, addr);
  1039           __ subcc(count, 1, count);
  1040           __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  1041           __ delayed()->add(addr, 1, addr);
  1043         break;
  1044       case BarrierSet::ModRef:
  1045         break;
  1046       default:
  1047         ShouldNotReachHere();
  1051   //
  1052   // Generate main code for disjoint arraycopy
  1053   //
  1054   typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
  1055                                               Label& L_loop, bool use_prefetch, bool use_bis);
  1057   void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
  1058                           int iter_size, StubGenerator::CopyLoopFunc copy_loop_func) {
  1059     Label L_copy;
  1061     assert(log2_elem_size <= 3, "the following code should be changed");
  1062     int count_dec = 16>>log2_elem_size;
  1064     int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
  1065     assert(prefetch_dist < 4096, "invalid value");
  1066     prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
  1067     int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
  1069     if (UseBlockCopy) {
  1070       Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
  1072       // 64 bytes tail + bytes copied in one loop iteration
  1073       int tail_size = 64 + iter_size;
  1074       int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
  1075       // Use BIS copy only for big arrays since it requires membar.
  1076       __ set(block_copy_count, O4);
  1077       __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
  1078       // This code is for disjoint source and destination:
  1079       //   to <= from || to >= from+count
  1080       // but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
  1081       __ sub(from, to, O4);
  1082       __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
  1083       __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
  1085       __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
  1086       // BIS should not be used to copy tail (64 bytes+iter_size)
  1087       // to avoid zeroing of following values.
  1088       __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
  1090       if (prefetch_count > 0) { // rounded up to one iteration count
  1091         // Do prefetching only if copy size is bigger
  1092         // than prefetch distance.
  1093         __ set(prefetch_count, O4);
  1094         __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
  1095         __ sub(count, prefetch_count, count);
  1097         (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
  1098         __ add(count, prefetch_count, count); // restore count
  1100       } // prefetch_count > 0
  1102       (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
  1103       __ add(count, (tail_size>>log2_elem_size), count); // restore count
  1105       __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
  1106       // BIS needs membar.
  1107       __ membar(Assembler::StoreLoad);
  1108       // Copy tail
  1109       __ ba_short(L_copy);
  1111       __ BIND(L_skip_block_copy);
  1112     } // UseBlockCopy
  1114     if (prefetch_count > 0) { // rounded up to one iteration count
  1115       // Do prefetching only if copy size is bigger
  1116       // than prefetch distance.
  1117       __ set(prefetch_count, O4);
  1118       __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
  1119       __ sub(count, prefetch_count, count);
  1121       Label L_copy_prefetch;
  1122       (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
  1123       __ add(count, prefetch_count, count); // restore count
  1125     } // prefetch_count > 0
  1127     (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
  1132   //
  1133   // Helper methods for copy_16_bytes_forward_with_shift()
  1134   //
  1135   void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
  1136                                 Label& L_loop, bool use_prefetch, bool use_bis) {
  1138     const Register left_shift  = G1; // left  shift bit counter
  1139     const Register right_shift = G5; // right shift bit counter
  1141     __ align(OptoLoopAlignment);
  1142     __ BIND(L_loop);
  1143     if (use_prefetch) {
  1144       if (ArraycopySrcPrefetchDistance > 0) {
  1145         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
  1147       if (ArraycopyDstPrefetchDistance > 0) {
  1148         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
  1151     __ ldx(from, 0, O4);
  1152     __ ldx(from, 8, G4);
  1153     __ inc(to, 16);
  1154     __ inc(from, 16);
  1155     __ deccc(count, count_dec); // Can we do next iteration after this one?
  1156     __ srlx(O4, right_shift, G3);
  1157     __ bset(G3, O3);
  1158     __ sllx(O4, left_shift,  O4);
  1159     __ srlx(G4, right_shift, G3);
  1160     __ bset(G3, O4);
  1161     if (use_bis) {
  1162       __ stxa(O3, to, -16);
  1163       __ stxa(O4, to, -8);
  1164     } else {
  1165       __ stx(O3, to, -16);
  1166       __ stx(O4, to, -8);
  1168     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  1169     __ delayed()->sllx(G4, left_shift,  O3);
  1172   // Copy big chunks forward with shift
  1173   //
  1174   // Inputs:
  1175   //   from      - source arrays
  1176   //   to        - destination array aligned to 8-bytes
  1177   //   count     - elements count to copy >= the count equivalent to 16 bytes
  1178   //   count_dec - elements count's decrement equivalent to 16 bytes
  1179   //   L_copy_bytes - copy exit label
  1180   //
  1181   void copy_16_bytes_forward_with_shift(Register from, Register to,
  1182                      Register count, int log2_elem_size, Label& L_copy_bytes) {
  1183     Label L_aligned_copy, L_copy_last_bytes;
  1184     assert(log2_elem_size <= 3, "the following code should be changed");
  1185     int count_dec = 16>>log2_elem_size;
  1187     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
  1188     __ andcc(from, 7, G1); // misaligned bytes
  1189     __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  1190     __ delayed()->nop();
  1192     const Register left_shift  = G1; // left  shift bit counter
  1193     const Register right_shift = G5; // right shift bit counter
  1195     __ sll(G1, LogBitsPerByte, left_shift);
  1196     __ mov(64, right_shift);
  1197     __ sub(right_shift, left_shift, right_shift);
  1199     //
  1200     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  1201     // to form 2 aligned 8-bytes chunks to store.
  1202     //
  1203     __ dec(count, count_dec);   // Pre-decrement 'count'
  1204     __ andn(from, 7, from);     // Align address
  1205     __ ldx(from, 0, O3);
  1206     __ inc(from, 8);
  1207     __ sllx(O3, left_shift,  O3);
  1209     disjoint_copy_core(from, to, count, log2_elem_size, 16, &StubGenerator::copy_16_bytes_shift_loop);
  1211     __ inccc(count, count_dec>>1 ); // + 8 bytes
  1212     __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
  1213     __ delayed()->inc(count, count_dec>>1); // restore 'count'
  1215     // copy 8 bytes, part of them already loaded in O3
  1216     __ ldx(from, 0, O4);
  1217     __ inc(to, 8);
  1218     __ inc(from, 8);
  1219     __ srlx(O4, right_shift, G3);
  1220     __ bset(O3, G3);
  1221     __ stx(G3, to, -8);
  1223     __ BIND(L_copy_last_bytes);
  1224     __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
  1225     __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
  1226     __ delayed()->sub(from, right_shift, from);       // restore address
  1228     __ BIND(L_aligned_copy);
  1231   // Copy big chunks backward with shift
  1232   //
  1233   // Inputs:
  1234   //   end_from  - source arrays end address
  1235   //   end_to    - destination array end address aligned to 8-bytes
  1236   //   count     - elements count to copy >= the count equivalent to 16 bytes
  1237   //   count_dec - elements count's decrement equivalent to 16 bytes
  1238   //   L_aligned_copy - aligned copy exit label
  1239   //   L_copy_bytes   - copy exit label
  1240   //
  1241   void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
  1242                      Register count, int count_dec,
  1243                      Label& L_aligned_copy, Label& L_copy_bytes) {
  1244     Label L_loop, L_copy_last_bytes;
  1246     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
  1247       __ andcc(end_from, 7, G1); // misaligned bytes
  1248       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  1249       __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
  1251     const Register left_shift  = G1; // left  shift bit counter
  1252     const Register right_shift = G5; // right shift bit counter
  1254       __ sll(G1, LogBitsPerByte, left_shift);
  1255       __ mov(64, right_shift);
  1256       __ sub(right_shift, left_shift, right_shift);
  1258     //
  1259     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  1260     // to form 2 aligned 8-bytes chunks to store.
  1261     //
  1262       __ andn(end_from, 7, end_from);     // Align address
  1263       __ ldx(end_from, 0, O3);
  1264       __ align(OptoLoopAlignment);
  1265     __ BIND(L_loop);
  1266       __ ldx(end_from, -8, O4);
  1267       __ deccc(count, count_dec); // Can we do next iteration after this one?
  1268       __ ldx(end_from, -16, G4);
  1269       __ dec(end_to, 16);
  1270       __ dec(end_from, 16);
  1271       __ srlx(O3, right_shift, O3);
  1272       __ sllx(O4, left_shift,  G3);
  1273       __ bset(G3, O3);
  1274       __ stx(O3, end_to, 8);
  1275       __ srlx(O4, right_shift, O4);
  1276       __ sllx(G4, left_shift,  G3);
  1277       __ bset(G3, O4);
  1278       __ stx(O4, end_to, 0);
  1279       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  1280       __ delayed()->mov(G4, O3);
  1282       __ inccc(count, count_dec>>1 ); // + 8 bytes
  1283       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
  1284       __ delayed()->inc(count, count_dec>>1); // restore 'count'
  1286       // copy 8 bytes, part of them already loaded in O3
  1287       __ ldx(end_from, -8, O4);
  1288       __ dec(end_to, 8);
  1289       __ dec(end_from, 8);
  1290       __ srlx(O3, right_shift, O3);
  1291       __ sllx(O4, left_shift,  G3);
  1292       __ bset(O3, G3);
  1293       __ stx(G3, end_to, 0);
  1295     __ BIND(L_copy_last_bytes);
  1296       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
  1297       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
  1298       __ delayed()->add(end_from, left_shift, end_from); // restore address
  1301   //
  1302   //  Generate stub for disjoint byte copy.  If "aligned" is true, the
  1303   //  "from" and "to" addresses are assumed to be heapword aligned.
  1304   //
  1305   // Arguments for generated stub:
  1306   //      from:  O0
  1307   //      to:    O1
  1308   //      count: O2 treated as signed
  1309   //
  1310   address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
  1311     __ align(CodeEntryAlignment);
  1312     StubCodeMark mark(this, "StubRoutines", name);
  1313     address start = __ pc();
  1315     Label L_skip_alignment, L_align;
  1316     Label L_copy_byte, L_copy_byte_loop, L_exit;
  1318     const Register from      = O0;   // source array address
  1319     const Register to        = O1;   // destination array address
  1320     const Register count     = O2;   // elements count
  1321     const Register offset    = O5;   // offset from start of arrays
  1322     // O3, O4, G3, G4 are used as temp registers
  1324     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1326     if (entry != NULL) {
  1327       *entry = __ pc();
  1328       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1329       BLOCK_COMMENT("Entry:");
  1332     // for short arrays, just do single element copy
  1333     __ cmp(count, 23); // 16 + 7
  1334     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
  1335     __ delayed()->mov(G0, offset);
  1337     if (aligned) {
  1338       // 'aligned' == true when it is known statically during compilation
  1339       // of this arraycopy call site that both 'from' and 'to' addresses
  1340       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
  1341       //
  1342       // Aligned arrays have 4 bytes alignment in 32-bits VM
  1343       // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
  1344       //
  1345 #ifndef _LP64
  1346       // copy a 4-bytes word if necessary to align 'to' to 8 bytes
  1347       __ andcc(to, 7, G0);
  1348       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
  1349       __ delayed()->ld(from, 0, O3);
  1350       __ inc(from, 4);
  1351       __ inc(to, 4);
  1352       __ dec(count, 4);
  1353       __ st(O3, to, -4);
  1354     __ BIND(L_skip_alignment);
  1355 #endif
  1356     } else {
  1357       // copy bytes to align 'to' on 8 byte boundary
  1358       __ andcc(to, 7, G1); // misaligned bytes
  1359       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1360       __ delayed()->neg(G1);
  1361       __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
  1362       __ sub(count, G1, count);
  1363     __ BIND(L_align);
  1364       __ ldub(from, 0, O3);
  1365       __ deccc(G1);
  1366       __ inc(from);
  1367       __ stb(O3, to, 0);
  1368       __ br(Assembler::notZero, false, Assembler::pt, L_align);
  1369       __ delayed()->inc(to);
  1370     __ BIND(L_skip_alignment);
  1372 #ifdef _LP64
  1373     if (!aligned)
  1374 #endif
  1376       // Copy with shift 16 bytes per iteration if arrays do not have
  1377       // the same alignment mod 8, otherwise fall through to the next
  1378       // code for aligned copy.
  1379       // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
  1380       // Also jump over aligned copy after the copy with shift completed.
  1382       copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
  1385     // Both array are 8 bytes aligned, copy 16 bytes at a time
  1386       __ and3(count, 7, G4); // Save count
  1387       __ srl(count, 3, count);
  1388      generate_disjoint_long_copy_core(aligned);
  1389       __ mov(G4, count);     // Restore count
  1391     // copy tailing bytes
  1392     __ BIND(L_copy_byte);
  1393       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1394       __ align(OptoLoopAlignment);
  1395     __ BIND(L_copy_byte_loop);
  1396       __ ldub(from, offset, O3);
  1397       __ deccc(count);
  1398       __ stb(O3, to, offset);
  1399       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
  1400       __ delayed()->inc(offset);
  1402     __ BIND(L_exit);
  1403       // O3, O4 are used as temp registers
  1404       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
  1405       __ retl();
  1406       __ delayed()->mov(G0, O0); // return 0
  1407     return start;
  1410   //
  1411   //  Generate stub for conjoint byte copy.  If "aligned" is true, the
  1412   //  "from" and "to" addresses are assumed to be heapword aligned.
  1413   //
  1414   // Arguments for generated stub:
  1415   //      from:  O0
  1416   //      to:    O1
  1417   //      count: O2 treated as signed
  1418   //
  1419   address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
  1420                                       address *entry, const char *name) {
  1421     // Do reverse copy.
  1423     __ align(CodeEntryAlignment);
  1424     StubCodeMark mark(this, "StubRoutines", name);
  1425     address start = __ pc();
  1427     Label L_skip_alignment, L_align, L_aligned_copy;
  1428     Label L_copy_byte, L_copy_byte_loop, L_exit;
  1430     const Register from      = O0;   // source array address
  1431     const Register to        = O1;   // destination array address
  1432     const Register count     = O2;   // elements count
  1433     const Register end_from  = from; // source array end address
  1434     const Register end_to    = to;   // destination array end address
  1436     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1438     if (entry != NULL) {
  1439       *entry = __ pc();
  1440       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1441       BLOCK_COMMENT("Entry:");
  1444     array_overlap_test(nooverlap_target, 0);
  1446     __ add(to, count, end_to);       // offset after last copied element
  1448     // for short arrays, just do single element copy
  1449     __ cmp(count, 23); // 16 + 7
  1450     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
  1451     __ delayed()->add(from, count, end_from);
  1454       // Align end of arrays since they could be not aligned even
  1455       // when arrays itself are aligned.
  1457       // copy bytes to align 'end_to' on 8 byte boundary
  1458       __ andcc(end_to, 7, G1); // misaligned bytes
  1459       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1460       __ delayed()->nop();
  1461       __ sub(count, G1, count);
  1462     __ BIND(L_align);
  1463       __ dec(end_from);
  1464       __ dec(end_to);
  1465       __ ldub(end_from, 0, O3);
  1466       __ deccc(G1);
  1467       __ brx(Assembler::notZero, false, Assembler::pt, L_align);
  1468       __ delayed()->stb(O3, end_to, 0);
  1469     __ BIND(L_skip_alignment);
  1471 #ifdef _LP64
  1472     if (aligned) {
  1473       // Both arrays are aligned to 8-bytes in 64-bits VM.
  1474       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
  1475       // in unaligned case.
  1476       __ dec(count, 16);
  1477     } else
  1478 #endif
  1480       // Copy with shift 16 bytes per iteration if arrays do not have
  1481       // the same alignment mod 8, otherwise jump to the next
  1482       // code for aligned copy (and substracting 16 from 'count' before jump).
  1483       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
  1484       // Also jump over aligned copy after the copy with shift completed.
  1486       copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
  1487                                         L_aligned_copy, L_copy_byte);
  1489     // copy 4 elements (16 bytes) at a time
  1490       __ align(OptoLoopAlignment);
  1491     __ BIND(L_aligned_copy);
  1492       __ dec(end_from, 16);
  1493       __ ldx(end_from, 8, O3);
  1494       __ ldx(end_from, 0, O4);
  1495       __ dec(end_to, 16);
  1496       __ deccc(count, 16);
  1497       __ stx(O3, end_to, 8);
  1498       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
  1499       __ delayed()->stx(O4, end_to, 0);
  1500       __ inc(count, 16);
  1502     // copy 1 element (2 bytes) at a time
  1503     __ BIND(L_copy_byte);
  1504       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1505       __ align(OptoLoopAlignment);
  1506     __ BIND(L_copy_byte_loop);
  1507       __ dec(end_from);
  1508       __ dec(end_to);
  1509       __ ldub(end_from, 0, O4);
  1510       __ deccc(count);
  1511       __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
  1512       __ delayed()->stb(O4, end_to, 0);
  1514     __ BIND(L_exit);
  1515     // O3, O4 are used as temp registers
  1516     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
  1517     __ retl();
  1518     __ delayed()->mov(G0, O0); // return 0
  1519     return start;
  1522   //
  1523   //  Generate stub for disjoint short copy.  If "aligned" is true, the
  1524   //  "from" and "to" addresses are assumed to be heapword aligned.
  1525   //
  1526   // Arguments for generated stub:
  1527   //      from:  O0
  1528   //      to:    O1
  1529   //      count: O2 treated as signed
  1530   //
  1531   address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
  1532     __ align(CodeEntryAlignment);
  1533     StubCodeMark mark(this, "StubRoutines", name);
  1534     address start = __ pc();
  1536     Label L_skip_alignment, L_skip_alignment2;
  1537     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
  1539     const Register from      = O0;   // source array address
  1540     const Register to        = O1;   // destination array address
  1541     const Register count     = O2;   // elements count
  1542     const Register offset    = O5;   // offset from start of arrays
  1543     // O3, O4, G3, G4 are used as temp registers
  1545     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1547     if (entry != NULL) {
  1548       *entry = __ pc();
  1549       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1550       BLOCK_COMMENT("Entry:");
  1553     // for short arrays, just do single element copy
  1554     __ cmp(count, 11); // 8 + 3  (22 bytes)
  1555     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
  1556     __ delayed()->mov(G0, offset);
  1558     if (aligned) {
  1559       // 'aligned' == true when it is known statically during compilation
  1560       // of this arraycopy call site that both 'from' and 'to' addresses
  1561       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
  1562       //
  1563       // Aligned arrays have 4 bytes alignment in 32-bits VM
  1564       // and 8 bytes - in 64-bits VM.
  1565       //
  1566 #ifndef _LP64
  1567       // copy a 2-elements word if necessary to align 'to' to 8 bytes
  1568       __ andcc(to, 7, G0);
  1569       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1570       __ delayed()->ld(from, 0, O3);
  1571       __ inc(from, 4);
  1572       __ inc(to, 4);
  1573       __ dec(count, 2);
  1574       __ st(O3, to, -4);
  1575     __ BIND(L_skip_alignment);
  1576 #endif
  1577     } else {
  1578       // copy 1 element if necessary to align 'to' on an 4 bytes
  1579       __ andcc(to, 3, G0);
  1580       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1581       __ delayed()->lduh(from, 0, O3);
  1582       __ inc(from, 2);
  1583       __ inc(to, 2);
  1584       __ dec(count);
  1585       __ sth(O3, to, -2);
  1586     __ BIND(L_skip_alignment);
  1588       // copy 2 elements to align 'to' on an 8 byte boundary
  1589       __ andcc(to, 7, G0);
  1590       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
  1591       __ delayed()->lduh(from, 0, O3);
  1592       __ dec(count, 2);
  1593       __ lduh(from, 2, O4);
  1594       __ inc(from, 4);
  1595       __ inc(to, 4);
  1596       __ sth(O3, to, -4);
  1597       __ sth(O4, to, -2);
  1598     __ BIND(L_skip_alignment2);
  1600 #ifdef _LP64
  1601     if (!aligned)
  1602 #endif
  1604       // Copy with shift 16 bytes per iteration if arrays do not have
  1605       // the same alignment mod 8, otherwise fall through to the next
  1606       // code for aligned copy.
  1607       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
  1608       // Also jump over aligned copy after the copy with shift completed.
  1610       copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
  1613     // Both array are 8 bytes aligned, copy 16 bytes at a time
  1614       __ and3(count, 3, G4); // Save
  1615       __ srl(count, 2, count);
  1616      generate_disjoint_long_copy_core(aligned);
  1617       __ mov(G4, count); // restore
  1619     // copy 1 element at a time
  1620     __ BIND(L_copy_2_bytes);
  1621       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1622       __ align(OptoLoopAlignment);
  1623     __ BIND(L_copy_2_bytes_loop);
  1624       __ lduh(from, offset, O3);
  1625       __ deccc(count);
  1626       __ sth(O3, to, offset);
  1627       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
  1628       __ delayed()->inc(offset, 2);
  1630     __ BIND(L_exit);
  1631       // O3, O4 are used as temp registers
  1632       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
  1633       __ retl();
  1634       __ delayed()->mov(G0, O0); // return 0
  1635     return start;
  1638   //
  1639   //  Generate stub for disjoint short fill.  If "aligned" is true, the
  1640   //  "to" address is assumed to be heapword aligned.
  1641   //
  1642   // Arguments for generated stub:
  1643   //      to:    O0
  1644   //      value: O1
  1645   //      count: O2 treated as signed
  1646   //
  1647   address generate_fill(BasicType t, bool aligned, const char* name) {
  1648     __ align(CodeEntryAlignment);
  1649     StubCodeMark mark(this, "StubRoutines", name);
  1650     address start = __ pc();
  1652     const Register to        = O0;   // source array address
  1653     const Register value     = O1;   // fill value
  1654     const Register count     = O2;   // elements count
  1655     // O3 is used as a temp register
  1657     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1659     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
  1660     Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
  1662     int shift = -1;
  1663     switch (t) {
  1664        case T_BYTE:
  1665         shift = 2;
  1666         break;
  1667        case T_SHORT:
  1668         shift = 1;
  1669         break;
  1670       case T_INT:
  1671          shift = 0;
  1672         break;
  1673       default: ShouldNotReachHere();
  1676     BLOCK_COMMENT("Entry:");
  1678     if (t == T_BYTE) {
  1679       // Zero extend value
  1680       __ and3(value, 0xff, value);
  1681       __ sllx(value, 8, O3);
  1682       __ or3(value, O3, value);
  1684     if (t == T_SHORT) {
  1685       // Zero extend value
  1686       __ sllx(value, 48, value);
  1687       __ srlx(value, 48, value);
  1689     if (t == T_BYTE || t == T_SHORT) {
  1690       __ sllx(value, 16, O3);
  1691       __ or3(value, O3, value);
  1694     __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
  1695     __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
  1696     __ delayed()->andcc(count, 1, G0);
  1698     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
  1699       // align source address at 4 bytes address boundary
  1700       if (t == T_BYTE) {
  1701         // One byte misalignment happens only for byte arrays
  1702         __ andcc(to, 1, G0);
  1703         __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
  1704         __ delayed()->nop();
  1705         __ stb(value, to, 0);
  1706         __ inc(to, 1);
  1707         __ dec(count, 1);
  1708         __ BIND(L_skip_align1);
  1710       // Two bytes misalignment happens only for byte and short (char) arrays
  1711       __ andcc(to, 2, G0);
  1712       __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
  1713       __ delayed()->nop();
  1714       __ sth(value, to, 0);
  1715       __ inc(to, 2);
  1716       __ dec(count, 1 << (shift - 1));
  1717       __ BIND(L_skip_align2);
  1719 #ifdef _LP64
  1720     if (!aligned) {
  1721 #endif
  1722     // align to 8 bytes, we know we are 4 byte aligned to start
  1723     __ andcc(to, 7, G0);
  1724     __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
  1725     __ delayed()->nop();
  1726     __ stw(value, to, 0);
  1727     __ inc(to, 4);
  1728     __ dec(count, 1 << shift);
  1729     __ BIND(L_fill_32_bytes);
  1730 #ifdef _LP64
  1732 #endif
  1734     if (t == T_INT) {
  1735       // Zero extend value
  1736       __ srl(value, 0, value);
  1738     if (t == T_BYTE || t == T_SHORT || t == T_INT) {
  1739       __ sllx(value, 32, O3);
  1740       __ or3(value, O3, value);
  1743     Label L_check_fill_8_bytes;
  1744     // Fill 32-byte chunks
  1745     __ subcc(count, 8 << shift, count);
  1746     __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
  1747     __ delayed()->nop();
  1749     Label L_fill_32_bytes_loop, L_fill_4_bytes;
  1750     __ align(16);
  1751     __ BIND(L_fill_32_bytes_loop);
  1753     __ stx(value, to, 0);
  1754     __ stx(value, to, 8);
  1755     __ stx(value, to, 16);
  1756     __ stx(value, to, 24);
  1758     __ subcc(count, 8 << shift, count);
  1759     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
  1760     __ delayed()->add(to, 32, to);
  1762     __ BIND(L_check_fill_8_bytes);
  1763     __ addcc(count, 8 << shift, count);
  1764     __ brx(Assembler::zero, false, Assembler::pn, L_exit);
  1765     __ delayed()->subcc(count, 1 << (shift + 1), count);
  1766     __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
  1767     __ delayed()->andcc(count, 1<<shift, G0);
  1769     //
  1770     // length is too short, just fill 8 bytes at a time
  1771     //
  1772     Label L_fill_8_bytes_loop;
  1773     __ BIND(L_fill_8_bytes_loop);
  1774     __ stx(value, to, 0);
  1775     __ subcc(count, 1 << (shift + 1), count);
  1776     __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
  1777     __ delayed()->add(to, 8, to);
  1779     // fill trailing 4 bytes
  1780     __ andcc(count, 1<<shift, G0);  // in delay slot of branches
  1781     if (t == T_INT) {
  1782       __ BIND(L_fill_elements);
  1784     __ BIND(L_fill_4_bytes);
  1785     __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
  1786     if (t == T_BYTE || t == T_SHORT) {
  1787       __ delayed()->andcc(count, 1<<(shift-1), G0);
  1788     } else {
  1789       __ delayed()->nop();
  1791     __ stw(value, to, 0);
  1792     if (t == T_BYTE || t == T_SHORT) {
  1793       __ inc(to, 4);
  1794       // fill trailing 2 bytes
  1795       __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
  1796       __ BIND(L_fill_2_bytes);
  1797       __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
  1798       __ delayed()->andcc(count, 1, count);
  1799       __ sth(value, to, 0);
  1800       if (t == T_BYTE) {
  1801         __ inc(to, 2);
  1802         // fill trailing byte
  1803         __ andcc(count, 1, count);  // in delay slot of branches
  1804         __ BIND(L_fill_byte);
  1805         __ brx(Assembler::zero, false, Assembler::pt, L_exit);
  1806         __ delayed()->nop();
  1807         __ stb(value, to, 0);
  1808       } else {
  1809         __ BIND(L_fill_byte);
  1811     } else {
  1812       __ BIND(L_fill_2_bytes);
  1814     __ BIND(L_exit);
  1815     __ retl();
  1816     __ delayed()->nop();
  1818     // Handle copies less than 8 bytes.  Int is handled elsewhere.
  1819     if (t == T_BYTE) {
  1820       __ BIND(L_fill_elements);
  1821       Label L_fill_2, L_fill_4;
  1822       // in delay slot __ andcc(count, 1, G0);
  1823       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
  1824       __ delayed()->andcc(count, 2, G0);
  1825       __ stb(value, to, 0);
  1826       __ inc(to, 1);
  1827       __ BIND(L_fill_2);
  1828       __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
  1829       __ delayed()->andcc(count, 4, G0);
  1830       __ stb(value, to, 0);
  1831       __ stb(value, to, 1);
  1832       __ inc(to, 2);
  1833       __ BIND(L_fill_4);
  1834       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
  1835       __ delayed()->nop();
  1836       __ stb(value, to, 0);
  1837       __ stb(value, to, 1);
  1838       __ stb(value, to, 2);
  1839       __ retl();
  1840       __ delayed()->stb(value, to, 3);
  1843     if (t == T_SHORT) {
  1844       Label L_fill_2;
  1845       __ BIND(L_fill_elements);
  1846       // in delay slot __ andcc(count, 1, G0);
  1847       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
  1848       __ delayed()->andcc(count, 2, G0);
  1849       __ sth(value, to, 0);
  1850       __ inc(to, 2);
  1851       __ BIND(L_fill_2);
  1852       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
  1853       __ delayed()->nop();
  1854       __ sth(value, to, 0);
  1855       __ retl();
  1856       __ delayed()->sth(value, to, 2);
  1858     return start;
  1861   //
  1862   //  Generate stub for conjoint short copy.  If "aligned" is true, the
  1863   //  "from" and "to" addresses are assumed to be heapword aligned.
  1864   //
  1865   // Arguments for generated stub:
  1866   //      from:  O0
  1867   //      to:    O1
  1868   //      count: O2 treated as signed
  1869   //
  1870   address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
  1871                                        address *entry, const char *name) {
  1872     // Do reverse copy.
  1874     __ align(CodeEntryAlignment);
  1875     StubCodeMark mark(this, "StubRoutines", name);
  1876     address start = __ pc();
  1878     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
  1879     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
  1881     const Register from      = O0;   // source array address
  1882     const Register to        = O1;   // destination array address
  1883     const Register count     = O2;   // elements count
  1884     const Register end_from  = from; // source array end address
  1885     const Register end_to    = to;   // destination array end address
  1887     const Register byte_count = O3;  // bytes count to copy
  1889     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1891     if (entry != NULL) {
  1892       *entry = __ pc();
  1893       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1894       BLOCK_COMMENT("Entry:");
  1897     array_overlap_test(nooverlap_target, 1);
  1899     __ sllx(count, LogBytesPerShort, byte_count);
  1900     __ add(to, byte_count, end_to);  // offset after last copied element
  1902     // for short arrays, just do single element copy
  1903     __ cmp(count, 11); // 8 + 3  (22 bytes)
  1904     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
  1905     __ delayed()->add(from, byte_count, end_from);
  1908       // Align end of arrays since they could be not aligned even
  1909       // when arrays itself are aligned.
  1911       // copy 1 element if necessary to align 'end_to' on an 4 bytes
  1912       __ andcc(end_to, 3, G0);
  1913       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1914       __ delayed()->lduh(end_from, -2, O3);
  1915       __ dec(end_from, 2);
  1916       __ dec(end_to, 2);
  1917       __ dec(count);
  1918       __ sth(O3, end_to, 0);
  1919     __ BIND(L_skip_alignment);
  1921       // copy 2 elements to align 'end_to' on an 8 byte boundary
  1922       __ andcc(end_to, 7, G0);
  1923       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
  1924       __ delayed()->lduh(end_from, -2, O3);
  1925       __ dec(count, 2);
  1926       __ lduh(end_from, -4, O4);
  1927       __ dec(end_from, 4);
  1928       __ dec(end_to, 4);
  1929       __ sth(O3, end_to, 2);
  1930       __ sth(O4, end_to, 0);
  1931     __ BIND(L_skip_alignment2);
  1933 #ifdef _LP64
  1934     if (aligned) {
  1935       // Both arrays are aligned to 8-bytes in 64-bits VM.
  1936       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
  1937       // in unaligned case.
  1938       __ dec(count, 8);
  1939     } else
  1940 #endif
  1942       // Copy with shift 16 bytes per iteration if arrays do not have
  1943       // the same alignment mod 8, otherwise jump to the next
  1944       // code for aligned copy (and substracting 8 from 'count' before jump).
  1945       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
  1946       // Also jump over aligned copy after the copy with shift completed.
  1948       copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
  1949                                         L_aligned_copy, L_copy_2_bytes);
  1951     // copy 4 elements (16 bytes) at a time
  1952       __ align(OptoLoopAlignment);
  1953     __ BIND(L_aligned_copy);
  1954       __ dec(end_from, 16);
  1955       __ ldx(end_from, 8, O3);
  1956       __ ldx(end_from, 0, O4);
  1957       __ dec(end_to, 16);
  1958       __ deccc(count, 8);
  1959       __ stx(O3, end_to, 8);
  1960       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
  1961       __ delayed()->stx(O4, end_to, 0);
  1962       __ inc(count, 8);
  1964     // copy 1 element (2 bytes) at a time
  1965     __ BIND(L_copy_2_bytes);
  1966       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1967     __ BIND(L_copy_2_bytes_loop);
  1968       __ dec(end_from, 2);
  1969       __ dec(end_to, 2);
  1970       __ lduh(end_from, 0, O4);
  1971       __ deccc(count);
  1972       __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
  1973       __ delayed()->sth(O4, end_to, 0);
  1975     __ BIND(L_exit);
  1976     // O3, O4 are used as temp registers
  1977     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
  1978     __ retl();
  1979     __ delayed()->mov(G0, O0); // return 0
  1980     return start;
  1983   //
  1984   // Helper methods for generate_disjoint_int_copy_core()
  1985   //
  1986   void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
  1987                           Label& L_loop, bool use_prefetch, bool use_bis) {
  1989     __ align(OptoLoopAlignment);
  1990     __ BIND(L_loop);
  1991     if (use_prefetch) {
  1992       if (ArraycopySrcPrefetchDistance > 0) {
  1993         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
  1995       if (ArraycopyDstPrefetchDistance > 0) {
  1996         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
  1999     __ ldx(from, 4, O4);
  2000     __ ldx(from, 12, G4);
  2001     __ inc(to, 16);
  2002     __ inc(from, 16);
  2003     __ deccc(count, 4); // Can we do next iteration after this one?
  2005     __ srlx(O4, 32, G3);
  2006     __ bset(G3, O3);
  2007     __ sllx(O4, 32, O4);
  2008     __ srlx(G4, 32, G3);
  2009     __ bset(G3, O4);
  2010     if (use_bis) {
  2011       __ stxa(O3, to, -16);
  2012       __ stxa(O4, to, -8);
  2013     } else {
  2014       __ stx(O3, to, -16);
  2015       __ stx(O4, to, -8);
  2017     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  2018     __ delayed()->sllx(G4, 32,  O3);
  2022   //
  2023   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
  2024   //  If "aligned" is true, the "from" and "to" addresses are assumed
  2025   //  to be heapword aligned.
  2026   //
  2027   // Arguments:
  2028   //      from:  O0
  2029   //      to:    O1
  2030   //      count: O2 treated as signed
  2031   //
  2032   void generate_disjoint_int_copy_core(bool aligned) {
  2034     Label L_skip_alignment, L_aligned_copy;
  2035     Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
  2037     const Register from      = O0;   // source array address
  2038     const Register to        = O1;   // destination array address
  2039     const Register count     = O2;   // elements count
  2040     const Register offset    = O5;   // offset from start of arrays
  2041     // O3, O4, G3, G4 are used as temp registers
  2043     // 'aligned' == true when it is known statically during compilation
  2044     // of this arraycopy call site that both 'from' and 'to' addresses
  2045     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
  2046     //
  2047     // Aligned arrays have 4 bytes alignment in 32-bits VM
  2048     // and 8 bytes - in 64-bits VM.
  2049     //
  2050 #ifdef _LP64
  2051     if (!aligned)
  2052 #endif
  2054       // The next check could be put under 'ifndef' since the code in
  2055       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
  2057       // for short arrays, just do single element copy
  2058       __ cmp(count, 5); // 4 + 1 (20 bytes)
  2059       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
  2060       __ delayed()->mov(G0, offset);
  2062       // copy 1 element to align 'to' on an 8 byte boundary
  2063       __ andcc(to, 7, G0);
  2064       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  2065       __ delayed()->ld(from, 0, O3);
  2066       __ inc(from, 4);
  2067       __ inc(to, 4);
  2068       __ dec(count);
  2069       __ st(O3, to, -4);
  2070     __ BIND(L_skip_alignment);
  2072     // if arrays have same alignment mod 8, do 4 elements copy
  2073       __ andcc(from, 7, G0);
  2074       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  2075       __ delayed()->ld(from, 0, O3);
  2077     //
  2078     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  2079     // to form 2 aligned 8-bytes chunks to store.
  2080     //
  2081     // copy_16_bytes_forward_with_shift() is not used here since this
  2082     // code is more optimal.
  2084     // copy with shift 4 elements (16 bytes) at a time
  2085       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
  2086       __ sllx(O3, 32,  O3);
  2088       disjoint_copy_core(from, to, count, 2, 16, &StubGenerator::copy_16_bytes_loop);
  2090       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
  2091       __ delayed()->inc(count, 4); // restore 'count'
  2093     __ BIND(L_aligned_copy);
  2094     } // !aligned
  2096     // copy 4 elements (16 bytes) at a time
  2097       __ and3(count, 1, G4); // Save
  2098       __ srl(count, 1, count);
  2099      generate_disjoint_long_copy_core(aligned);
  2100       __ mov(G4, count);     // Restore
  2102     // copy 1 element at a time
  2103     __ BIND(L_copy_4_bytes);
  2104       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  2105     __ BIND(L_copy_4_bytes_loop);
  2106       __ ld(from, offset, O3);
  2107       __ deccc(count);
  2108       __ st(O3, to, offset);
  2109       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
  2110       __ delayed()->inc(offset, 4);
  2111     __ BIND(L_exit);
  2114   //
  2115   //  Generate stub for disjoint int copy.  If "aligned" is true, the
  2116   //  "from" and "to" addresses are assumed to be heapword aligned.
  2117   //
  2118   // Arguments for generated stub:
  2119   //      from:  O0
  2120   //      to:    O1
  2121   //      count: O2 treated as signed
  2122   //
  2123   address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
  2124     __ align(CodeEntryAlignment);
  2125     StubCodeMark mark(this, "StubRoutines", name);
  2126     address start = __ pc();
  2128     const Register count = O2;
  2129     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2131     if (entry != NULL) {
  2132       *entry = __ pc();
  2133       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2134       BLOCK_COMMENT("Entry:");
  2137     generate_disjoint_int_copy_core(aligned);
  2139     // O3, O4 are used as temp registers
  2140     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
  2141     __ retl();
  2142     __ delayed()->mov(G0, O0); // return 0
  2143     return start;
  2146   //
  2147   //  Generate core code for conjoint int copy (and oop copy on 32-bit).
  2148   //  If "aligned" is true, the "from" and "to" addresses are assumed
  2149   //  to be heapword aligned.
  2150   //
  2151   // Arguments:
  2152   //      from:  O0
  2153   //      to:    O1
  2154   //      count: O2 treated as signed
  2155   //
  2156   void generate_conjoint_int_copy_core(bool aligned) {
  2157     // Do reverse copy.
  2159     Label L_skip_alignment, L_aligned_copy;
  2160     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
  2162     const Register from      = O0;   // source array address
  2163     const Register to        = O1;   // destination array address
  2164     const Register count     = O2;   // elements count
  2165     const Register end_from  = from; // source array end address
  2166     const Register end_to    = to;   // destination array end address
  2167     // O3, O4, O5, G3 are used as temp registers
  2169     const Register byte_count = O3;  // bytes count to copy
  2171       __ sllx(count, LogBytesPerInt, byte_count);
  2172       __ add(to, byte_count, end_to); // offset after last copied element
  2174       __ cmp(count, 5); // for short arrays, just do single element copy
  2175       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
  2176       __ delayed()->add(from, byte_count, end_from);
  2178     // copy 1 element to align 'to' on an 8 byte boundary
  2179       __ andcc(end_to, 7, G0);
  2180       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  2181       __ delayed()->nop();
  2182       __ dec(count);
  2183       __ dec(end_from, 4);
  2184       __ dec(end_to,   4);
  2185       __ ld(end_from, 0, O4);
  2186       __ st(O4, end_to, 0);
  2187     __ BIND(L_skip_alignment);
  2189     // Check if 'end_from' and 'end_to' has the same alignment.
  2190       __ andcc(end_from, 7, G0);
  2191       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  2192       __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
  2194     // copy with shift 4 elements (16 bytes) at a time
  2195     //
  2196     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  2197     // to form 2 aligned 8-bytes chunks to store.
  2198     //
  2199       __ ldx(end_from, -4, O3);
  2200       __ align(OptoLoopAlignment);
  2201     __ BIND(L_copy_16_bytes);
  2202       __ ldx(end_from, -12, O4);
  2203       __ deccc(count, 4);
  2204       __ ldx(end_from, -20, O5);
  2205       __ dec(end_to, 16);
  2206       __ dec(end_from, 16);
  2207       __ srlx(O3, 32, O3);
  2208       __ sllx(O4, 32, G3);
  2209       __ bset(G3, O3);
  2210       __ stx(O3, end_to, 8);
  2211       __ srlx(O4, 32, O4);
  2212       __ sllx(O5, 32, G3);
  2213       __ bset(O4, G3);
  2214       __ stx(G3, end_to, 0);
  2215       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
  2216       __ delayed()->mov(O5, O3);
  2218       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
  2219       __ delayed()->inc(count, 4);
  2221     // copy 4 elements (16 bytes) at a time
  2222       __ align(OptoLoopAlignment);
  2223     __ BIND(L_aligned_copy);
  2224       __ dec(end_from, 16);
  2225       __ ldx(end_from, 8, O3);
  2226       __ ldx(end_from, 0, O4);
  2227       __ dec(end_to, 16);
  2228       __ deccc(count, 4);
  2229       __ stx(O3, end_to, 8);
  2230       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
  2231       __ delayed()->stx(O4, end_to, 0);
  2232       __ inc(count, 4);
  2234     // copy 1 element (4 bytes) at a time
  2235     __ BIND(L_copy_4_bytes);
  2236       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  2237     __ BIND(L_copy_4_bytes_loop);
  2238       __ dec(end_from, 4);
  2239       __ dec(end_to, 4);
  2240       __ ld(end_from, 0, O4);
  2241       __ deccc(count);
  2242       __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
  2243       __ delayed()->st(O4, end_to, 0);
  2244     __ BIND(L_exit);
  2247   //
  2248   //  Generate stub for conjoint int copy.  If "aligned" is true, the
  2249   //  "from" and "to" addresses are assumed to be heapword aligned.
  2250   //
  2251   // Arguments for generated stub:
  2252   //      from:  O0
  2253   //      to:    O1
  2254   //      count: O2 treated as signed
  2255   //
  2256   address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
  2257                                      address *entry, const char *name) {
  2258     __ align(CodeEntryAlignment);
  2259     StubCodeMark mark(this, "StubRoutines", name);
  2260     address start = __ pc();
  2262     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
  2264     if (entry != NULL) {
  2265       *entry = __ pc();
  2266       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2267       BLOCK_COMMENT("Entry:");
  2270     array_overlap_test(nooverlap_target, 2);
  2272     generate_conjoint_int_copy_core(aligned);
  2274     // O3, O4 are used as temp registers
  2275     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
  2276     __ retl();
  2277     __ delayed()->mov(G0, O0); // return 0
  2278     return start;
  2281   //
  2282   // Helper methods for generate_disjoint_long_copy_core()
  2283   //
  2284   void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
  2285                           Label& L_loop, bool use_prefetch, bool use_bis) {
  2286     __ align(OptoLoopAlignment);
  2287     __ BIND(L_loop);
  2288     for (int off = 0; off < 64; off += 16) {
  2289       if (use_prefetch && (off & 31) == 0) {
  2290         if (ArraycopySrcPrefetchDistance > 0) {
  2291           __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads);
  2293         if (ArraycopyDstPrefetchDistance > 0) {
  2294           __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads);
  2297       __ ldx(from,  off+0, O4);
  2298       __ ldx(from,  off+8, O5);
  2299       if (use_bis) {
  2300         __ stxa(O4, to,  off+0);
  2301         __ stxa(O5, to,  off+8);
  2302       } else {
  2303         __ stx(O4, to,  off+0);
  2304         __ stx(O5, to,  off+8);
  2307     __ deccc(count, 8);
  2308     __ inc(from, 64);
  2309     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  2310     __ delayed()->inc(to, 64);
  2313   //
  2314   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
  2315   //  "aligned" is ignored, because we must make the stronger
  2316   //  assumption that both addresses are always 64-bit aligned.
  2317   //
  2318   // Arguments:
  2319   //      from:  O0
  2320   //      to:    O1
  2321   //      count: O2 treated as signed
  2322   //
  2323   // count -= 2;
  2324   // if ( count >= 0 ) { // >= 2 elements
  2325   //   if ( count > 6) { // >= 8 elements
  2326   //     count -= 6; // original count - 8
  2327   //     do {
  2328   //       copy_8_elements;
  2329   //       count -= 8;
  2330   //     } while ( count >= 0 );
  2331   //     count += 6;
  2332   //   }
  2333   //   if ( count >= 0 ) { // >= 2 elements
  2334   //     do {
  2335   //       copy_2_elements;
  2336   //     } while ( (count=count-2) >= 0 );
  2337   //   }
  2338   // }
  2339   // count += 2;
  2340   // if ( count != 0 ) { // 1 element left
  2341   //   copy_1_element;
  2342   // }
  2343   //
  2344   void generate_disjoint_long_copy_core(bool aligned) {
  2345     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
  2346     const Register from    = O0;  // source array address
  2347     const Register to      = O1;  // destination array address
  2348     const Register count   = O2;  // elements count
  2349     const Register offset0 = O4;  // element offset
  2350     const Register offset8 = O5;  // next element offset
  2352     __ deccc(count, 2);
  2353     __ mov(G0, offset0);   // offset from start of arrays (0)
  2354     __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
  2355     __ delayed()->add(offset0, 8, offset8);
  2357     // Copy by 64 bytes chunks
  2359     const Register from64 = O3;  // source address
  2360     const Register to64   = G3;  // destination address
  2361     __ subcc(count, 6, O3);
  2362     __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
  2363     __ delayed()->mov(to,   to64);
  2364     // Now we can use O4(offset0), O5(offset8) as temps
  2365     __ mov(O3, count);
  2366     // count >= 0 (original count - 8)
  2367     __ mov(from, from64);
  2369     disjoint_copy_core(from64, to64, count, 3, 64, &StubGenerator::copy_64_bytes_loop);
  2371       // Restore O4(offset0), O5(offset8)
  2372       __ sub(from64, from, offset0);
  2373       __ inccc(count, 6); // restore count
  2374       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
  2375       __ delayed()->add(offset0, 8, offset8);
  2377       // Copy by 16 bytes chunks
  2378       __ align(OptoLoopAlignment);
  2379     __ BIND(L_copy_16_bytes);
  2380       __ ldx(from, offset0, O3);
  2381       __ ldx(from, offset8, G3);
  2382       __ deccc(count, 2);
  2383       __ stx(O3, to, offset0);
  2384       __ inc(offset0, 16);
  2385       __ stx(G3, to, offset8);
  2386       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
  2387       __ delayed()->inc(offset8, 16);
  2389       // Copy last 8 bytes
  2390     __ BIND(L_copy_8_bytes);
  2391       __ inccc(count, 2);
  2392       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
  2393       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
  2394       __ ldx(from, offset0, O3);
  2395       __ stx(O3, to, offset0);
  2396     __ BIND(L_exit);
  2399   //
  2400   //  Generate stub for disjoint long copy.
  2401   //  "aligned" is ignored, because we must make the stronger
  2402   //  assumption that both addresses are always 64-bit aligned.
  2403   //
  2404   // Arguments for generated stub:
  2405   //      from:  O0
  2406   //      to:    O1
  2407   //      count: O2 treated as signed
  2408   //
  2409   address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
  2410     __ align(CodeEntryAlignment);
  2411     StubCodeMark mark(this, "StubRoutines", name);
  2412     address start = __ pc();
  2414     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
  2416     if (entry != NULL) {
  2417       *entry = __ pc();
  2418       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2419       BLOCK_COMMENT("Entry:");
  2422     generate_disjoint_long_copy_core(aligned);
  2424     // O3, O4 are used as temp registers
  2425     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
  2426     __ retl();
  2427     __ delayed()->mov(G0, O0); // return 0
  2428     return start;
  2431   //
  2432   //  Generate core code for conjoint long copy (and oop copy on 64-bit).
  2433   //  "aligned" is ignored, because we must make the stronger
  2434   //  assumption that both addresses are always 64-bit aligned.
  2435   //
  2436   // Arguments:
  2437   //      from:  O0
  2438   //      to:    O1
  2439   //      count: O2 treated as signed
  2440   //
  2441   void generate_conjoint_long_copy_core(bool aligned) {
  2442     // Do reverse copy.
  2443     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
  2444     const Register from    = O0;  // source array address
  2445     const Register to      = O1;  // destination array address
  2446     const Register count   = O2;  // elements count
  2447     const Register offset8 = O4;  // element offset
  2448     const Register offset0 = O5;  // previous element offset
  2450       __ subcc(count, 1, count);
  2451       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
  2452       __ delayed()->sllx(count, LogBytesPerLong, offset8);
  2453       __ sub(offset8, 8, offset0);
  2454       __ align(OptoLoopAlignment);
  2455     __ BIND(L_copy_16_bytes);
  2456       __ ldx(from, offset8, O2);
  2457       __ ldx(from, offset0, O3);
  2458       __ stx(O2, to, offset8);
  2459       __ deccc(offset8, 16);      // use offset8 as counter
  2460       __ stx(O3, to, offset0);
  2461       __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
  2462       __ delayed()->dec(offset0, 16);
  2464     __ BIND(L_copy_8_bytes);
  2465       __ brx(Assembler::negative, false, Assembler::pn, L_exit );
  2466       __ delayed()->nop();
  2467       __ ldx(from, 0, O3);
  2468       __ stx(O3, to, 0);
  2469     __ BIND(L_exit);
  2472   //  Generate stub for conjoint long copy.
  2473   //  "aligned" is ignored, because we must make the stronger
  2474   //  assumption that both addresses are always 64-bit aligned.
  2475   //
  2476   // Arguments for generated stub:
  2477   //      from:  O0
  2478   //      to:    O1
  2479   //      count: O2 treated as signed
  2480   //
  2481   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
  2482                                       address *entry, const char *name) {
  2483     __ align(CodeEntryAlignment);
  2484     StubCodeMark mark(this, "StubRoutines", name);
  2485     address start = __ pc();
  2487     assert(aligned, "Should always be aligned");
  2489     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
  2491     if (entry != NULL) {
  2492       *entry = __ pc();
  2493       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2494       BLOCK_COMMENT("Entry:");
  2497     array_overlap_test(nooverlap_target, 3);
  2499     generate_conjoint_long_copy_core(aligned);
  2501     // O3, O4 are used as temp registers
  2502     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
  2503     __ retl();
  2504     __ delayed()->mov(G0, O0); // return 0
  2505     return start;
  2508   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
  2509   //  "from" and "to" addresses are assumed to be heapword aligned.
  2510   //
  2511   // Arguments for generated stub:
  2512   //      from:  O0
  2513   //      to:    O1
  2514   //      count: O2 treated as signed
  2515   //
  2516   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
  2517                                      bool dest_uninitialized = false) {
  2519     const Register from  = O0;  // source array address
  2520     const Register to    = O1;  // destination array address
  2521     const Register count = O2;  // elements count
  2523     __ align(CodeEntryAlignment);
  2524     StubCodeMark mark(this, "StubRoutines", name);
  2525     address start = __ pc();
  2527     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2529     if (entry != NULL) {
  2530       *entry = __ pc();
  2531       // caller can pass a 64-bit byte count here
  2532       BLOCK_COMMENT("Entry:");
  2535     // save arguments for barrier generation
  2536     __ mov(to, G1);
  2537     __ mov(count, G5);
  2538     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
  2539   #ifdef _LP64
  2540     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2541     if (UseCompressedOops) {
  2542       generate_disjoint_int_copy_core(aligned);
  2543     } else {
  2544       generate_disjoint_long_copy_core(aligned);
  2546   #else
  2547     generate_disjoint_int_copy_core(aligned);
  2548   #endif
  2549     // O0 is used as temp register
  2550     gen_write_ref_array_post_barrier(G1, G5, O0);
  2552     // O3, O4 are used as temp registers
  2553     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
  2554     __ retl();
  2555     __ delayed()->mov(G0, O0); // return 0
  2556     return start;
  2559   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
  2560   //  "from" and "to" addresses are assumed to be heapword aligned.
  2561   //
  2562   // Arguments for generated stub:
  2563   //      from:  O0
  2564   //      to:    O1
  2565   //      count: O2 treated as signed
  2566   //
  2567   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
  2568                                      address *entry, const char *name,
  2569                                      bool dest_uninitialized = false) {
  2571     const Register from  = O0;  // source array address
  2572     const Register to    = O1;  // destination array address
  2573     const Register count = O2;  // elements count
  2575     __ align(CodeEntryAlignment);
  2576     StubCodeMark mark(this, "StubRoutines", name);
  2577     address start = __ pc();
  2579     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2581     if (entry != NULL) {
  2582       *entry = __ pc();
  2583       // caller can pass a 64-bit byte count here
  2584       BLOCK_COMMENT("Entry:");
  2587     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
  2589     // save arguments for barrier generation
  2590     __ mov(to, G1);
  2591     __ mov(count, G5);
  2592     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
  2594   #ifdef _LP64
  2595     if (UseCompressedOops) {
  2596       generate_conjoint_int_copy_core(aligned);
  2597     } else {
  2598       generate_conjoint_long_copy_core(aligned);
  2600   #else
  2601     generate_conjoint_int_copy_core(aligned);
  2602   #endif
  2604     // O0 is used as temp register
  2605     gen_write_ref_array_post_barrier(G1, G5, O0);
  2607     // O3, O4 are used as temp registers
  2608     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
  2609     __ retl();
  2610     __ delayed()->mov(G0, O0); // return 0
  2611     return start;
  2615   // Helper for generating a dynamic type check.
  2616   // Smashes only the given temp registers.
  2617   void generate_type_check(Register sub_klass,
  2618                            Register super_check_offset,
  2619                            Register super_klass,
  2620                            Register temp,
  2621                            Label& L_success) {
  2622     assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
  2624     BLOCK_COMMENT("type_check:");
  2626     Label L_miss, L_pop_to_miss;
  2628     assert_clean_int(super_check_offset, temp);
  2630     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
  2631                                      &L_success, &L_miss, NULL,
  2632                                      super_check_offset);
  2634     BLOCK_COMMENT("type_check_slow_path:");
  2635     __ save_frame(0);
  2636     __ check_klass_subtype_slow_path(sub_klass->after_save(),
  2637                                      super_klass->after_save(),
  2638                                      L0, L1, L2, L4,
  2639                                      NULL, &L_pop_to_miss);
  2640     __ ba(L_success);
  2641     __ delayed()->restore();
  2643     __ bind(L_pop_to_miss);
  2644     __ restore();
  2646     // Fall through on failure!
  2647     __ BIND(L_miss);
  2651   //  Generate stub for checked oop copy.
  2652   //
  2653   // Arguments for generated stub:
  2654   //      from:  O0
  2655   //      to:    O1
  2656   //      count: O2 treated as signed
  2657   //      ckoff: O3 (super_check_offset)
  2658   //      ckval: O4 (super_klass)
  2659   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
  2660   //
  2661   address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) {
  2663     const Register O0_from   = O0;      // source array address
  2664     const Register O1_to     = O1;      // destination array address
  2665     const Register O2_count  = O2;      // elements count
  2666     const Register O3_ckoff  = O3;      // super_check_offset
  2667     const Register O4_ckval  = O4;      // super_klass
  2669     const Register O5_offset = O5;      // loop var, with stride wordSize
  2670     const Register G1_remain = G1;      // loop var, with stride -1
  2671     const Register G3_oop    = G3;      // actual oop copied
  2672     const Register G4_klass  = G4;      // oop._klass
  2673     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
  2675     __ align(CodeEntryAlignment);
  2676     StubCodeMark mark(this, "StubRoutines", name);
  2677     address start = __ pc();
  2679 #ifdef ASSERT
  2680     // We sometimes save a frame (see generate_type_check below).
  2681     // If this will cause trouble, let's fail now instead of later.
  2682     __ save_frame(0);
  2683     __ restore();
  2684 #endif
  2686     assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
  2688 #ifdef ASSERT
  2689     // caller guarantees that the arrays really are different
  2690     // otherwise, we would have to make conjoint checks
  2691     { Label L;
  2692       __ mov(O3, G1);           // spill: overlap test smashes O3
  2693       __ mov(O4, G4);           // spill: overlap test smashes O4
  2694       array_overlap_test(L, LogBytesPerHeapOop);
  2695       __ stop("checkcast_copy within a single array");
  2696       __ bind(L);
  2697       __ mov(G1, O3);
  2698       __ mov(G4, O4);
  2700 #endif //ASSERT
  2702     if (entry != NULL) {
  2703       *entry = __ pc();
  2704       // caller can pass a 64-bit byte count here (from generic stub)
  2705       BLOCK_COMMENT("Entry:");
  2707     gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized);
  2709     Label load_element, store_element, do_card_marks, fail, done;
  2710     __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
  2711     __ brx(Assembler::notZero, false, Assembler::pt, load_element);
  2712     __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
  2714     // Empty array:  Nothing to do.
  2715     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
  2716     __ retl();
  2717     __ delayed()->set(0, O0);           // return 0 on (trivial) success
  2719     // ======== begin loop ========
  2720     // (Loop is rotated; its entry is load_element.)
  2721     // Loop variables:
  2722     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
  2723     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
  2724     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
  2725     __ align(OptoLoopAlignment);
  2727     __ BIND(store_element);
  2728     __ deccc(G1_remain);                // decrement the count
  2729     __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
  2730     __ inc(O5_offset, heapOopSize);     // step to next offset
  2731     __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
  2732     __ delayed()->set(0, O0);           // return -1 on success
  2734     // ======== loop entry is here ========
  2735     __ BIND(load_element);
  2736     __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
  2737     __ br_null_short(G3_oop, Assembler::pt, store_element);
  2739     __ load_klass(G3_oop, G4_klass); // query the object klass
  2741     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
  2742                         // branch to this on success:
  2743                         store_element);
  2744     // ======== end loop ========
  2746     // It was a real error; we must depend on the caller to finish the job.
  2747     // Register G1 has number of *remaining* oops, O2 number of *total* oops.
  2748     // Emit GC store barriers for the oops we have copied (O2 minus G1),
  2749     // and report their number to the caller.
  2750     __ BIND(fail);
  2751     __ subcc(O2_count, G1_remain, O2_count);
  2752     __ brx(Assembler::zero, false, Assembler::pt, done);
  2753     __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
  2755     __ BIND(do_card_marks);
  2756     gen_write_ref_array_post_barrier(O1_to, O2_count, O3);   // store check on O1[0..O2]
  2758     __ BIND(done);
  2759     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
  2760     __ retl();
  2761     __ delayed()->nop();             // return value in 00
  2763     return start;
  2767   //  Generate 'unsafe' array copy stub
  2768   //  Though just as safe as the other stubs, it takes an unscaled
  2769   //  size_t argument instead of an element count.
  2770   //
  2771   // Arguments for generated stub:
  2772   //      from:  O0
  2773   //      to:    O1
  2774   //      count: O2 byte count, treated as ssize_t, can be zero
  2775   //
  2776   // Examines the alignment of the operands and dispatches
  2777   // to a long, int, short, or byte copy loop.
  2778   //
  2779   address generate_unsafe_copy(const char* name,
  2780                                address byte_copy_entry,
  2781                                address short_copy_entry,
  2782                                address int_copy_entry,
  2783                                address long_copy_entry) {
  2785     const Register O0_from   = O0;      // source array address
  2786     const Register O1_to     = O1;      // destination array address
  2787     const Register O2_count  = O2;      // elements count
  2789     const Register G1_bits   = G1;      // test copy of low bits
  2791     __ align(CodeEntryAlignment);
  2792     StubCodeMark mark(this, "StubRoutines", name);
  2793     address start = __ pc();
  2795     // bump this on entry, not on exit:
  2796     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
  2798     __ or3(O0_from, O1_to, G1_bits);
  2799     __ or3(O2_count,       G1_bits, G1_bits);
  2801     __ btst(BytesPerLong-1, G1_bits);
  2802     __ br(Assembler::zero, true, Assembler::pt,
  2803           long_copy_entry, relocInfo::runtime_call_type);
  2804     // scale the count on the way out:
  2805     __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
  2807     __ btst(BytesPerInt-1, G1_bits);
  2808     __ br(Assembler::zero, true, Assembler::pt,
  2809           int_copy_entry, relocInfo::runtime_call_type);
  2810     // scale the count on the way out:
  2811     __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
  2813     __ btst(BytesPerShort-1, G1_bits);
  2814     __ br(Assembler::zero, true, Assembler::pt,
  2815           short_copy_entry, relocInfo::runtime_call_type);
  2816     // scale the count on the way out:
  2817     __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
  2819     __ br(Assembler::always, false, Assembler::pt,
  2820           byte_copy_entry, relocInfo::runtime_call_type);
  2821     __ delayed()->nop();
  2823     return start;
  2827   // Perform range checks on the proposed arraycopy.
  2828   // Kills the two temps, but nothing else.
  2829   // Also, clean the sign bits of src_pos and dst_pos.
  2830   void arraycopy_range_checks(Register src,     // source array oop (O0)
  2831                               Register src_pos, // source position (O1)
  2832                               Register dst,     // destination array oo (O2)
  2833                               Register dst_pos, // destination position (O3)
  2834                               Register length,  // length of copy (O4)
  2835                               Register temp1, Register temp2,
  2836                               Label& L_failed) {
  2837     BLOCK_COMMENT("arraycopy_range_checks:");
  2839     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
  2841     const Register array_length = temp1;  // scratch
  2842     const Register end_pos      = temp2;  // scratch
  2844     // Note:  This next instruction may be in the delay slot of a branch:
  2845     __ add(length, src_pos, end_pos);  // src_pos + length
  2846     __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
  2847     __ cmp(end_pos, array_length);
  2848     __ br(Assembler::greater, false, Assembler::pn, L_failed);
  2850     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
  2851     __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
  2852     __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
  2853     __ cmp(end_pos, array_length);
  2854     __ br(Assembler::greater, false, Assembler::pn, L_failed);
  2856     // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
  2857     // Move with sign extension can be used since they are positive.
  2858     __ delayed()->signx(src_pos, src_pos);
  2859     __ signx(dst_pos, dst_pos);
  2861     BLOCK_COMMENT("arraycopy_range_checks done");
  2865   //
  2866   //  Generate generic array copy stubs
  2867   //
  2868   //  Input:
  2869   //    O0    -  src oop
  2870   //    O1    -  src_pos
  2871   //    O2    -  dst oop
  2872   //    O3    -  dst_pos
  2873   //    O4    -  element count
  2874   //
  2875   //  Output:
  2876   //    O0 ==  0  -  success
  2877   //    O0 == -1  -  need to call System.arraycopy
  2878   //
  2879   address generate_generic_copy(const char *name,
  2880                                 address entry_jbyte_arraycopy,
  2881                                 address entry_jshort_arraycopy,
  2882                                 address entry_jint_arraycopy,
  2883                                 address entry_oop_arraycopy,
  2884                                 address entry_jlong_arraycopy,
  2885                                 address entry_checkcast_arraycopy) {
  2886     Label L_failed, L_objArray;
  2888     // Input registers
  2889     const Register src      = O0;  // source array oop
  2890     const Register src_pos  = O1;  // source position
  2891     const Register dst      = O2;  // destination array oop
  2892     const Register dst_pos  = O3;  // destination position
  2893     const Register length   = O4;  // elements count
  2895     // registers used as temp
  2896     const Register G3_src_klass = G3; // source array klass
  2897     const Register G4_dst_klass = G4; // destination array klass
  2898     const Register G5_lh        = G5; // layout handler
  2899     const Register O5_temp      = O5;
  2901     __ align(CodeEntryAlignment);
  2902     StubCodeMark mark(this, "StubRoutines", name);
  2903     address start = __ pc();
  2905     // bump this on entry, not on exit:
  2906     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
  2908     // In principle, the int arguments could be dirty.
  2909     //assert_clean_int(src_pos, G1);
  2910     //assert_clean_int(dst_pos, G1);
  2911     //assert_clean_int(length, G1);
  2913     //-----------------------------------------------------------------------
  2914     // Assembler stubs will be used for this call to arraycopy
  2915     // if the following conditions are met:
  2916     //
  2917     // (1) src and dst must not be null.
  2918     // (2) src_pos must not be negative.
  2919     // (3) dst_pos must not be negative.
  2920     // (4) length  must not be negative.
  2921     // (5) src klass and dst klass should be the same and not NULL.
  2922     // (6) src and dst should be arrays.
  2923     // (7) src_pos + length must not exceed length of src.
  2924     // (8) dst_pos + length must not exceed length of dst.
  2925     BLOCK_COMMENT("arraycopy initial argument checks");
  2927     //  if (src == NULL) return -1;
  2928     __ br_null(src, false, Assembler::pn, L_failed);
  2930     //  if (src_pos < 0) return -1;
  2931     __ delayed()->tst(src_pos);
  2932     __ br(Assembler::negative, false, Assembler::pn, L_failed);
  2933     __ delayed()->nop();
  2935     //  if (dst == NULL) return -1;
  2936     __ br_null(dst, false, Assembler::pn, L_failed);
  2938     //  if (dst_pos < 0) return -1;
  2939     __ delayed()->tst(dst_pos);
  2940     __ br(Assembler::negative, false, Assembler::pn, L_failed);
  2942     //  if (length < 0) return -1;
  2943     __ delayed()->tst(length);
  2944     __ br(Assembler::negative, false, Assembler::pn, L_failed);
  2946     BLOCK_COMMENT("arraycopy argument klass checks");
  2947     //  get src->klass()
  2948     if (UseCompressedClassPointers) {
  2949       __ delayed()->nop(); // ??? not good
  2950       __ load_klass(src, G3_src_klass);
  2951     } else {
  2952       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
  2955 #ifdef ASSERT
  2956     //  assert(src->klass() != NULL);
  2957     BLOCK_COMMENT("assert klasses not null");
  2958     { Label L_a, L_b;
  2959       __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL
  2960       __ bind(L_a);
  2961       __ stop("broken null klass");
  2962       __ bind(L_b);
  2963       __ load_klass(dst, G4_dst_klass);
  2964       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
  2965       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
  2966       BLOCK_COMMENT("assert done");
  2968 #endif
  2970     // Load layout helper
  2971     //
  2972     //  |array_tag|     | header_size | element_type |     |log2_element_size|
  2973     // 32        30    24            16              8     2                 0
  2974     //
  2975     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
  2976     //
  2978     int lh_offset = in_bytes(Klass::layout_helper_offset());
  2980     // Load 32-bits signed value. Use br() instruction with it to check icc.
  2981     __ lduw(G3_src_klass, lh_offset, G5_lh);
  2983     if (UseCompressedClassPointers) {
  2984       __ load_klass(dst, G4_dst_klass);
  2986     // Handle objArrays completely differently...
  2987     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
  2988     __ set(objArray_lh, O5_temp);
  2989     __ cmp(G5_lh,       O5_temp);
  2990     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
  2991     if (UseCompressedClassPointers) {
  2992       __ delayed()->nop();
  2993     } else {
  2994       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
  2997     //  if (src->klass() != dst->klass()) return -1;
  2998     __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed);
  3000     //  if (!src->is_Array()) return -1;
  3001     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
  3002     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
  3004     // At this point, it is known to be a typeArray (array_tag 0x3).
  3005 #ifdef ASSERT
  3006     __ delayed()->nop();
  3007     { Label L;
  3008       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
  3009       __ set(lh_prim_tag_in_place, O5_temp);
  3010       __ cmp(G5_lh,                O5_temp);
  3011       __ br(Assembler::greaterEqual, false, Assembler::pt, L);
  3012       __ delayed()->nop();
  3013       __ stop("must be a primitive array");
  3014       __ bind(L);
  3016 #else
  3017     __ delayed();                               // match next insn to prev branch
  3018 #endif
  3020     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
  3021                            O5_temp, G4_dst_klass, L_failed);
  3023     // TypeArrayKlass
  3024     //
  3025     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
  3026     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
  3027     //
  3029     const Register G4_offset = G4_dst_klass;    // array offset
  3030     const Register G3_elsize = G3_src_klass;    // log2 element size
  3032     __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
  3033     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
  3034     __ add(src, G4_offset, src);       // src array offset
  3035     __ add(dst, G4_offset, dst);       // dst array offset
  3036     __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
  3038     // next registers should be set before the jump to corresponding stub
  3039     const Register from     = O0;  // source array address
  3040     const Register to       = O1;  // destination array address
  3041     const Register count    = O2;  // elements count
  3043     // 'from', 'to', 'count' registers should be set in this order
  3044     // since they are the same as 'src', 'src_pos', 'dst'.
  3046     BLOCK_COMMENT("scale indexes to element size");
  3047     __ sll_ptr(src_pos, G3_elsize, src_pos);
  3048     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
  3049     __ add(src, src_pos, from);       // src_addr
  3050     __ add(dst, dst_pos, to);         // dst_addr
  3052     BLOCK_COMMENT("choose copy loop based on element size");
  3053     __ cmp(G3_elsize, 0);
  3054     __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy);
  3055     __ delayed()->signx(length, count); // length
  3057     __ cmp(G3_elsize, LogBytesPerShort);
  3058     __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy);
  3059     __ delayed()->signx(length, count); // length
  3061     __ cmp(G3_elsize, LogBytesPerInt);
  3062     __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy);
  3063     __ delayed()->signx(length, count); // length
  3064 #ifdef ASSERT
  3065     { Label L;
  3066       __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L);
  3067       __ stop("must be long copy, but elsize is wrong");
  3068       __ bind(L);
  3070 #endif
  3071     __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy);
  3072     __ delayed()->signx(length, count); // length
  3074     // ObjArrayKlass
  3075   __ BIND(L_objArray);
  3076     // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
  3078     Label L_plain_copy, L_checkcast_copy;
  3079     //  test array classes for subtyping
  3080     __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
  3081     __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
  3082     __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
  3084     // Identically typed arrays can be copied without element-wise checks.
  3085     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
  3086                            O5_temp, G5_lh, L_failed);
  3088     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
  3089     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
  3090     __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
  3091     __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
  3092     __ add(src, src_pos, from);       // src_addr
  3093     __ add(dst, dst_pos, to);         // dst_addr
  3094   __ BIND(L_plain_copy);
  3095     __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy);
  3096     __ delayed()->signx(length, count); // length
  3098   __ BIND(L_checkcast_copy);
  3099     // live at this point:  G3_src_klass, G4_dst_klass
  3101       // Before looking at dst.length, make sure dst is also an objArray.
  3102       // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
  3103       __ cmp(G5_lh,                    O5_temp);
  3104       __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
  3106       // It is safe to examine both src.length and dst.length.
  3107       __ delayed();                             // match next insn to prev branch
  3108       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
  3109                              O5_temp, G5_lh, L_failed);
  3111       // Marshal the base address arguments now, freeing registers.
  3112       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
  3113       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
  3114       __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
  3115       __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
  3116       __ add(src, src_pos, from);               // src_addr
  3117       __ add(dst, dst_pos, to);                 // dst_addr
  3118       __ signx(length, count);                  // length (reloaded)
  3120       Register sco_temp = O3;                   // this register is free now
  3121       assert_different_registers(from, to, count, sco_temp,
  3122                                  G4_dst_klass, G3_src_klass);
  3124       // Generate the type check.
  3125       int sco_offset = in_bytes(Klass::super_check_offset_offset());
  3126       __ lduw(G4_dst_klass, sco_offset, sco_temp);
  3127       generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
  3128                           O5_temp, L_plain_copy);
  3130       // Fetch destination element klass from the ObjArrayKlass header.
  3131       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
  3133       // the checkcast_copy loop needs two extra arguments:
  3134       __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
  3135       // lduw(O4, sco_offset, O3);              // sco of elem klass
  3137       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
  3138       __ delayed()->lduw(O4, sco_offset, O3);
  3141   __ BIND(L_failed);
  3142     __ retl();
  3143     __ delayed()->sub(G0, 1, O0); // return -1
  3144     return start;
  3147   //
  3148   //  Generate stub for heap zeroing.
  3149   //  "to" address is aligned to jlong (8 bytes).
  3150   //
  3151   // Arguments for generated stub:
  3152   //      to:    O0
  3153   //      count: O1 treated as signed (count of HeapWord)
  3154   //             count could be 0
  3155   //
  3156   address generate_zero_aligned_words(const char* name) {
  3157     __ align(CodeEntryAlignment);
  3158     StubCodeMark mark(this, "StubRoutines", name);
  3159     address start = __ pc();
  3161     const Register to    = O0;   // source array address
  3162     const Register count = O1;   // HeapWords count
  3163     const Register temp  = O2;   // scratch
  3165     Label Ldone;
  3166     __ sllx(count, LogHeapWordSize, count); // to bytes count
  3167     // Use BIS for zeroing
  3168     __ bis_zeroing(to, count, temp, Ldone);
  3169     __ bind(Ldone);
  3170     __ retl();
  3171     __ delayed()->nop();
  3172     return start;
  3175   void generate_arraycopy_stubs() {
  3176     address entry;
  3177     address entry_jbyte_arraycopy;
  3178     address entry_jshort_arraycopy;
  3179     address entry_jint_arraycopy;
  3180     address entry_oop_arraycopy;
  3181     address entry_jlong_arraycopy;
  3182     address entry_checkcast_arraycopy;
  3184     //*** jbyte
  3185     // Always need aligned and unaligned versions
  3186     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
  3187                                                                                   "jbyte_disjoint_arraycopy");
  3188     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
  3189                                                                                   &entry_jbyte_arraycopy,
  3190                                                                                   "jbyte_arraycopy");
  3191     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
  3192                                                                                   "arrayof_jbyte_disjoint_arraycopy");
  3193     StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
  3194                                                                                   "arrayof_jbyte_arraycopy");
  3196     //*** jshort
  3197     // Always need aligned and unaligned versions
  3198     StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
  3199                                                                                     "jshort_disjoint_arraycopy");
  3200     StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
  3201                                                                                     &entry_jshort_arraycopy,
  3202                                                                                     "jshort_arraycopy");
  3203     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
  3204                                                                                     "arrayof_jshort_disjoint_arraycopy");
  3205     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
  3206                                                                                     "arrayof_jshort_arraycopy");
  3208     //*** jint
  3209     // Aligned versions
  3210     StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
  3211                                                                                 "arrayof_jint_disjoint_arraycopy");
  3212     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
  3213                                                                                 "arrayof_jint_arraycopy");
  3214 #ifdef _LP64
  3215     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
  3216     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
  3217     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
  3218                                                                                 "jint_disjoint_arraycopy");
  3219     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
  3220                                                                                 &entry_jint_arraycopy,
  3221                                                                                 "jint_arraycopy");
  3222 #else
  3223     // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
  3224     // (in fact in 32bit we always have a pre-loop part even in the aligned version,
  3225     //  because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
  3226     StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
  3227     StubRoutines::_jint_arraycopy          = StubRoutines::_arrayof_jint_arraycopy;
  3228 #endif
  3231     //*** jlong
  3232     // It is always aligned
  3233     StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
  3234                                                                                   "arrayof_jlong_disjoint_arraycopy");
  3235     StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
  3236                                                                                   "arrayof_jlong_arraycopy");
  3237     StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
  3238     StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
  3241     //*** oops
  3242     // Aligned versions
  3243     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
  3244                                                                                       "arrayof_oop_disjoint_arraycopy");
  3245     StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
  3246                                                                                       "arrayof_oop_arraycopy");
  3247     // Aligned versions without pre-barriers
  3248     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry,
  3249                                                                                       "arrayof_oop_disjoint_arraycopy_uninit",
  3250                                                                                       /*dest_uninitialized*/true);
  3251     StubRoutines::_arrayof_oop_arraycopy_uninit          = generate_conjoint_oop_copy(true, entry, NULL,
  3252                                                                                       "arrayof_oop_arraycopy_uninit",
  3253                                                                                       /*dest_uninitialized*/true);
  3254 #ifdef _LP64
  3255     if (UseCompressedOops) {
  3256       // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
  3257       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
  3258                                                                                     "oop_disjoint_arraycopy");
  3259       StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
  3260                                                                                     "oop_arraycopy");
  3261       // Unaligned versions without pre-barriers
  3262       StubRoutines::_oop_disjoint_arraycopy_uninit     = generate_disjoint_oop_copy(false, &entry,
  3263                                                                                     "oop_disjoint_arraycopy_uninit",
  3264                                                                                     /*dest_uninitialized*/true);
  3265       StubRoutines::_oop_arraycopy_uninit              = generate_conjoint_oop_copy(false, entry, NULL,
  3266                                                                                     "oop_arraycopy_uninit",
  3267                                                                                     /*dest_uninitialized*/true);
  3268     } else
  3269 #endif
  3271       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
  3272       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
  3273       StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
  3274       StubRoutines::_oop_disjoint_arraycopy_uninit     = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit;
  3275       StubRoutines::_oop_arraycopy_uninit              = StubRoutines::_arrayof_oop_arraycopy_uninit;
  3278     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
  3279     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
  3280                                                                         /*dest_uninitialized*/true);
  3282     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
  3283                                                               entry_jbyte_arraycopy,
  3284                                                               entry_jshort_arraycopy,
  3285                                                               entry_jint_arraycopy,
  3286                                                               entry_jlong_arraycopy);
  3287     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
  3288                                                                entry_jbyte_arraycopy,
  3289                                                                entry_jshort_arraycopy,
  3290                                                                entry_jint_arraycopy,
  3291                                                                entry_oop_arraycopy,
  3292                                                                entry_jlong_arraycopy,
  3293                                                                entry_checkcast_arraycopy);
  3295     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
  3296     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
  3297     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
  3298     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
  3299     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
  3300     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
  3302     if (UseBlockZeroing) {
  3303       StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
  3307   address generate_aescrypt_encryptBlock() {
  3308     // required since we read expanded key 'int' array starting first element without alignment considerations
  3309     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
  3310            "the following code assumes that first element of an int array is aligned to 8 bytes");
  3311     __ align(CodeEntryAlignment);
  3312     StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
  3313     Label L_load_misaligned_input, L_load_expanded_key, L_doLast128bit, L_storeOutput, L_store_misaligned_output;
  3314     address start = __ pc();
  3315     Register from = O0; // source byte array
  3316     Register to = O1;   // destination byte array
  3317     Register key = O2;  // expanded key array
  3318     const Register keylen = O4; //reg for storing expanded key array length
  3320     // read expanded key length
  3321     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
  3323     // Method to address arbitrary alignment for load instructions:
  3324     // Check last 3 bits of 'from' address to see if it is aligned to 8-byte boundary
  3325     // If zero/aligned then continue with double FP load instructions
  3326     // If not zero/mis-aligned then alignaddr will set GSR.align with number of bytes to skip during faligndata
  3327     // alignaddr will also convert arbitrary aligned 'from' address to nearest 8-byte aligned address
  3328     // load 3 * 8-byte components (to read 16 bytes input) in 3 different FP regs starting at this aligned address
  3329     // faligndata will then extract (based on GSR.align value) the appropriate 8 bytes from the 2 source regs
  3331     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  3332     __ andcc(from, 7, G0);
  3333     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
  3334     __ delayed()->alignaddr(from, G0, from);
  3336     // aligned case: load input into F54-F56
  3337     __ ldf(FloatRegisterImpl::D, from, 0, F54);
  3338     __ ldf(FloatRegisterImpl::D, from, 8, F56);
  3339     __ ba_short(L_load_expanded_key);
  3341     __ BIND(L_load_misaligned_input);
  3342     __ ldf(FloatRegisterImpl::D, from, 0, F54);
  3343     __ ldf(FloatRegisterImpl::D, from, 8, F56);
  3344     __ ldf(FloatRegisterImpl::D, from, 16, F58);
  3345     __ faligndata(F54, F56, F54);
  3346     __ faligndata(F56, F58, F56);
  3348     __ BIND(L_load_expanded_key);
  3349     // Since we load expanded key buffers starting first element, 8-byte alignment is guaranteed
  3350     for ( int i = 0;  i <= 38; i += 2 ) {
  3351       __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i));
  3354     // perform cipher transformation
  3355     __ fxor(FloatRegisterImpl::D, F0, F54, F54);
  3356     __ fxor(FloatRegisterImpl::D, F2, F56, F56);
  3357     // rounds 1 through 8
  3358     for ( int i = 4;  i <= 28; i += 8 ) {
  3359       __ aes_eround01(as_FloatRegister(i), F54, F56, F58);
  3360       __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60);
  3361       __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54);
  3362       __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56);
  3364     __ aes_eround01(F36, F54, F56, F58); //round 9
  3365     __ aes_eround23(F38, F54, F56, F60);
  3367     // 128-bit original key size
  3368     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit);
  3370     for ( int i = 40;  i <= 50; i += 2 ) {
  3371       __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) );
  3373     __ aes_eround01(F40, F58, F60, F54); //round 10
  3374     __ aes_eround23(F42, F58, F60, F56);
  3375     __ aes_eround01(F44, F54, F56, F58); //round 11
  3376     __ aes_eround23(F46, F54, F56, F60);
  3378     // 192-bit original key size
  3379     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput);
  3381     __ ldf(FloatRegisterImpl::D, key, 208, F52);
  3382     __ aes_eround01(F48, F58, F60, F54); //round 12
  3383     __ aes_eround23(F50, F58, F60, F56);
  3384     __ ldf(FloatRegisterImpl::D, key, 216, F46);
  3385     __ ldf(FloatRegisterImpl::D, key, 224, F48);
  3386     __ ldf(FloatRegisterImpl::D, key, 232, F50);
  3387     __ aes_eround01(F52, F54, F56, F58); //round 13
  3388     __ aes_eround23(F46, F54, F56, F60);
  3389     __ ba_short(L_storeOutput);
  3391     __ BIND(L_doLast128bit);
  3392     __ ldf(FloatRegisterImpl::D, key, 160, F48);
  3393     __ ldf(FloatRegisterImpl::D, key, 168, F50);
  3395     __ BIND(L_storeOutput);
  3396     // perform last round of encryption common for all key sizes
  3397     __ aes_eround01_l(F48, F58, F60, F54); //last round
  3398     __ aes_eround23_l(F50, F58, F60, F56);
  3400     // Method to address arbitrary alignment for store instructions:
  3401     // Check last 3 bits of 'dest' address to see if it is aligned to 8-byte boundary
  3402     // If zero/aligned then continue with double FP store instructions
  3403     // If not zero/mis-aligned then edge8n will generate edge mask in result reg (O3 in below case)
  3404     // Example: If dest address is 0x07 and nearest 8-byte aligned address is 0x00 then edge mask will be 00000001
  3405     // Compute (8-n) where n is # of bytes skipped by partial store(stpartialf) inst from edge mask, n=7 in this case
  3406     // We get the value of n from the andcc that checks 'dest' alignment. n is available in O5 in below case.
  3407     // Set GSR.align to (8-n) using alignaddr
  3408     // Circular byte shift store values by n places so that the original bytes are at correct position for stpartialf
  3409     // Set the arbitrarily aligned 'dest' address to nearest 8-byte aligned address
  3410     // Store (partial) the original first (8-n) bytes starting at the original 'dest' address
  3411     // Negate the edge mask so that the subsequent stpartialf can store the original (8-n-1)th through 8th bytes at appropriate address
  3412     // We need to execute this process for both the 8-byte result values
  3414     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  3415     __ andcc(to, 7, O5);
  3416     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
  3417     __ delayed()->edge8n(to, G0, O3);
  3419     // aligned case: store output into the destination array
  3420     __ stf(FloatRegisterImpl::D, F54, to, 0);
  3421     __ retl();
  3422     __ delayed()->stf(FloatRegisterImpl::D, F56, to, 8);
  3424     __ BIND(L_store_misaligned_output);
  3425     __ add(to, 8, O4);
  3426     __ mov(8, O2);
  3427     __ sub(O2, O5, O2);
  3428     __ alignaddr(O2, G0, O2);
  3429     __ faligndata(F54, F54, F54);
  3430     __ faligndata(F56, F56, F56);
  3431     __ and3(to, -8, to);
  3432     __ and3(O4, -8, O4);
  3433     __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
  3434     __ stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
  3435     __ add(to, 8, to);
  3436     __ add(O4, 8, O4);
  3437     __ orn(G0, O3, O3);
  3438     __ stpartialf(to, O3, F54, Assembler::ASI_PST8_PRIMARY);
  3439     __ retl();
  3440     __ delayed()->stpartialf(O4, O3, F56, Assembler::ASI_PST8_PRIMARY);
  3442     return start;
  3445   address generate_aescrypt_decryptBlock() {
  3446     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
  3447            "the following code assumes that first element of an int array is aligned to 8 bytes");
  3448     // required since we read original key 'byte' array as well in the decryption stubs
  3449     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
  3450            "the following code assumes that first element of a byte array is aligned to 8 bytes");
  3451     __ align(CodeEntryAlignment);
  3452     StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
  3453     address start = __ pc();
  3454     Label L_load_misaligned_input, L_load_original_key, L_expand192bit, L_expand256bit, L_reload_misaligned_input;
  3455     Label L_256bit_transform, L_common_transform, L_store_misaligned_output;
  3456     Register from = O0; // source byte array
  3457     Register to = O1;   // destination byte array
  3458     Register key = O2;  // expanded key array
  3459     Register original_key = O3;  // original key array only required during decryption
  3460     const Register keylen = O4;  // reg for storing expanded key array length
  3462     // read expanded key array length
  3463     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
  3465     // save 'from' since we may need to recheck alignment in case of 256-bit decryption
  3466     __ mov(from, G1);
  3468     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  3469     __ andcc(from, 7, G0);
  3470     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input);
  3471     __ delayed()->alignaddr(from, G0, from);
  3473     // aligned case: load input into F52-F54
  3474     __ ldf(FloatRegisterImpl::D, from, 0, F52);
  3475     __ ldf(FloatRegisterImpl::D, from, 8, F54);
  3476     __ ba_short(L_load_original_key);
  3478     __ BIND(L_load_misaligned_input);
  3479     __ ldf(FloatRegisterImpl::D, from, 0, F52);
  3480     __ ldf(FloatRegisterImpl::D, from, 8, F54);
  3481     __ ldf(FloatRegisterImpl::D, from, 16, F56);
  3482     __ faligndata(F52, F54, F52);
  3483     __ faligndata(F54, F56, F54);
  3485     __ BIND(L_load_original_key);
  3486     // load original key from SunJCE expanded decryption key
  3487     // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
  3488     for ( int i = 0;  i <= 3; i++ ) {
  3489       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
  3492     // 256-bit original key size
  3493     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
  3495     // 192-bit original key size
  3496     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
  3498     // 128-bit original key size
  3499     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
  3500     for ( int i = 0;  i <= 36; i += 4 ) {
  3501       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
  3502       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
  3505     // perform 128-bit key specific inverse cipher transformation
  3506     __ fxor(FloatRegisterImpl::D, F42, F54, F54);
  3507     __ fxor(FloatRegisterImpl::D, F40, F52, F52);
  3508     __ ba_short(L_common_transform);
  3510     __ BIND(L_expand192bit);
  3512     // start loading rest of the 192-bit key
  3513     __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
  3514     __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
  3516     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
  3517     for ( int i = 0;  i <= 36; i += 6 ) {
  3518       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
  3519       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
  3520       __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
  3522     __ aes_kexpand1(F42, F46, 7, F48);
  3523     __ aes_kexpand2(F44, F48, F50);
  3525     // perform 192-bit key specific inverse cipher transformation
  3526     __ fxor(FloatRegisterImpl::D, F50, F54, F54);
  3527     __ fxor(FloatRegisterImpl::D, F48, F52, F52);
  3528     __ aes_dround23(F46, F52, F54, F58);
  3529     __ aes_dround01(F44, F52, F54, F56);
  3530     __ aes_dround23(F42, F56, F58, F54);
  3531     __ aes_dround01(F40, F56, F58, F52);
  3532     __ ba_short(L_common_transform);
  3534     __ BIND(L_expand256bit);
  3536     // load rest of the 256-bit key
  3537     for ( int i = 4;  i <= 7; i++ ) {
  3538       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
  3541     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
  3542     for ( int i = 0;  i <= 40; i += 8 ) {
  3543       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
  3544       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
  3545       __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
  3546       __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
  3548     __ aes_kexpand1(F48, F54, 6, F56);
  3549     __ aes_kexpand2(F50, F56, F58);
  3551     for ( int i = 0;  i <= 6; i += 2 ) {
  3552       __ fsrc2(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
  3555     // reload original 'from' address
  3556     __ mov(G1, from);
  3558     // re-check 8-byte alignment
  3559     __ andcc(from, 7, G0);
  3560     __ br(Assembler::notZero, true, Assembler::pn, L_reload_misaligned_input);
  3561     __ delayed()->alignaddr(from, G0, from);
  3563     // aligned case: load input into F52-F54
  3564     __ ldf(FloatRegisterImpl::D, from, 0, F52);
  3565     __ ldf(FloatRegisterImpl::D, from, 8, F54);
  3566     __ ba_short(L_256bit_transform);
  3568     __ BIND(L_reload_misaligned_input);
  3569     __ ldf(FloatRegisterImpl::D, from, 0, F52);
  3570     __ ldf(FloatRegisterImpl::D, from, 8, F54);
  3571     __ ldf(FloatRegisterImpl::D, from, 16, F56);
  3572     __ faligndata(F52, F54, F52);
  3573     __ faligndata(F54, F56, F54);
  3575     // perform 256-bit key specific inverse cipher transformation
  3576     __ BIND(L_256bit_transform);
  3577     __ fxor(FloatRegisterImpl::D, F0, F54, F54);
  3578     __ fxor(FloatRegisterImpl::D, F2, F52, F52);
  3579     __ aes_dround23(F4, F52, F54, F58);
  3580     __ aes_dround01(F6, F52, F54, F56);
  3581     __ aes_dround23(F50, F56, F58, F54);
  3582     __ aes_dround01(F48, F56, F58, F52);
  3583     __ aes_dround23(F46, F52, F54, F58);
  3584     __ aes_dround01(F44, F52, F54, F56);
  3585     __ aes_dround23(F42, F56, F58, F54);
  3586     __ aes_dround01(F40, F56, F58, F52);
  3588     for ( int i = 0;  i <= 7; i++ ) {
  3589       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
  3592     // perform inverse cipher transformations common for all key sizes
  3593     __ BIND(L_common_transform);
  3594     for ( int i = 38;  i >= 6; i -= 8 ) {
  3595       __ aes_dround23(as_FloatRegister(i), F52, F54, F58);
  3596       __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56);
  3597       if ( i != 6) {
  3598         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54);
  3599         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52);
  3600       } else {
  3601         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54);
  3602         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52);
  3606     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  3607     __ andcc(to, 7, O5);
  3608     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output);
  3609     __ delayed()->edge8n(to, G0, O3);
  3611     // aligned case: store output into the destination array
  3612     __ stf(FloatRegisterImpl::D, F52, to, 0);
  3613     __ retl();
  3614     __ delayed()->stf(FloatRegisterImpl::D, F54, to, 8);
  3616     __ BIND(L_store_misaligned_output);
  3617     __ add(to, 8, O4);
  3618     __ mov(8, O2);
  3619     __ sub(O2, O5, O2);
  3620     __ alignaddr(O2, G0, O2);
  3621     __ faligndata(F52, F52, F52);
  3622     __ faligndata(F54, F54, F54);
  3623     __ and3(to, -8, to);
  3624     __ and3(O4, -8, O4);
  3625     __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
  3626     __ stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
  3627     __ add(to, 8, to);
  3628     __ add(O4, 8, O4);
  3629     __ orn(G0, O3, O3);
  3630     __ stpartialf(to, O3, F52, Assembler::ASI_PST8_PRIMARY);
  3631     __ retl();
  3632     __ delayed()->stpartialf(O4, O3, F54, Assembler::ASI_PST8_PRIMARY);
  3634     return start;
  3637   address generate_cipherBlockChaining_encryptAESCrypt() {
  3638     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
  3639            "the following code assumes that first element of an int array is aligned to 8 bytes");
  3640     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
  3641            "the following code assumes that first element of a byte array is aligned to 8 bytes");
  3642     __ align(CodeEntryAlignment);
  3643     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
  3644     Label L_cbcenc128, L_load_misaligned_input_128bit, L_128bit_transform, L_store_misaligned_output_128bit;
  3645     Label L_check_loop_end_128bit, L_cbcenc192, L_load_misaligned_input_192bit, L_192bit_transform;
  3646     Label L_store_misaligned_output_192bit, L_check_loop_end_192bit, L_cbcenc256, L_load_misaligned_input_256bit;
  3647     Label L_256bit_transform, L_store_misaligned_output_256bit, L_check_loop_end_256bit;
  3648     address start = __ pc();
  3649     Register from = I0; // source byte array
  3650     Register to = I1;   // destination byte array
  3651     Register key = I2;  // expanded key array
  3652     Register rvec = I3; // init vector
  3653     const Register len_reg = I4; // cipher length
  3654     const Register keylen = I5;  // reg for storing expanded key array length
  3656     __ save_frame(0);
  3657     // save cipher len to return in the end
  3658     __ mov(len_reg, L0);
  3660     // read expanded key length
  3661     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
  3663     // load initial vector, 8-byte alignment is guranteed
  3664     __ ldf(FloatRegisterImpl::D, rvec, 0, F60);
  3665     __ ldf(FloatRegisterImpl::D, rvec, 8, F62);
  3666     // load key, 8-byte alignment is guranteed
  3667     __ ldx(key,0,G1);
  3668     __ ldx(key,8,G5);
  3670     // start loading expanded key, 8-byte alignment is guranteed
  3671     for ( int i = 0, j = 16;  i <= 38; i += 2, j += 8 ) {
  3672       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
  3675     // 128-bit original key size
  3676     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128);
  3678     for ( int i = 40, j = 176;  i <= 46; i += 2, j += 8 ) {
  3679       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
  3682     // 192-bit original key size
  3683     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192);
  3685     for ( int i = 48, j = 208;  i <= 54; i += 2, j += 8 ) {
  3686       __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
  3689     // 256-bit original key size
  3690     __ ba_short(L_cbcenc256);
  3692     __ align(OptoLoopAlignment);
  3693     __ BIND(L_cbcenc128);
  3694     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  3695     __ andcc(from, 7, G0);
  3696     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_128bit);
  3697     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
  3699     // aligned case: load input into G3 and G4
  3700     __ ldx(from,0,G3);
  3701     __ ldx(from,8,G4);
  3702     __ ba_short(L_128bit_transform);
  3704     __ BIND(L_load_misaligned_input_128bit);
  3705     // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
  3706     __ alignaddr(from, G0, from);
  3707     __ ldf(FloatRegisterImpl::D, from, 0, F48);
  3708     __ ldf(FloatRegisterImpl::D, from, 8, F50);
  3709     __ ldf(FloatRegisterImpl::D, from, 16, F52);
  3710     __ faligndata(F48, F50, F48);
  3711     __ faligndata(F50, F52, F50);
  3712     __ movdtox(F48, G3);
  3713     __ movdtox(F50, G4);
  3714     __ mov(L1, from);
  3716     __ BIND(L_128bit_transform);
  3717     __ xor3(G1,G3,G3);
  3718     __ xor3(G5,G4,G4);
  3719     __ movxtod(G3,F56);
  3720     __ movxtod(G4,F58);
  3721     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
  3722     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
  3724     // TEN_EROUNDS
  3725     for ( int i = 0;  i <= 32; i += 8 ) {
  3726       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
  3727       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
  3728       if (i != 32 ) {
  3729         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
  3730         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
  3731       } else {
  3732         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
  3733         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
  3737     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  3738     __ andcc(to, 7, L1);
  3739     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_128bit);
  3740     __ delayed()->edge8n(to, G0, L2);
  3742     // aligned case: store output into the destination array
  3743     __ stf(FloatRegisterImpl::D, F60, to, 0);
  3744     __ stf(FloatRegisterImpl::D, F62, to, 8);
  3745     __ ba_short(L_check_loop_end_128bit);
  3747     __ BIND(L_store_misaligned_output_128bit);
  3748     __ add(to, 8, L3);
  3749     __ mov(8, L4);
  3750     __ sub(L4, L1, L4);
  3751     __ alignaddr(L4, G0, L4);
  3752     // save cipher text before circular right shift
  3753     // as it needs to be stored as iv for next block (see code before next retl)
  3754     __ movdtox(F60, L6);
  3755     __ movdtox(F62, L7);
  3756     __ faligndata(F60, F60, F60);
  3757     __ faligndata(F62, F62, F62);
  3758     __ mov(to, L5);
  3759     __ and3(to, -8, to);
  3760     __ and3(L3, -8, L3);
  3761     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
  3762     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
  3763     __ add(to, 8, to);
  3764     __ add(L3, 8, L3);
  3765     __ orn(G0, L2, L2);
  3766     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
  3767     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
  3768     __ mov(L5, to);
  3769     __ movxtod(L6, F60);
  3770     __ movxtod(L7, F62);
  3772     __ BIND(L_check_loop_end_128bit);
  3773     __ add(from, 16, from);
  3774     __ add(to, 16, to);
  3775     __ subcc(len_reg, 16, len_reg);
  3776     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128);
  3777     __ delayed()->nop();
  3778     // re-init intial vector for next block, 8-byte alignment is guaranteed
  3779     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
  3780     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
  3781     __ mov(L0, I0);
  3782     __ ret();
  3783     __ delayed()->restore();
  3785     __ align(OptoLoopAlignment);
  3786     __ BIND(L_cbcenc192);
  3787     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  3788     __ andcc(from, 7, G0);
  3789     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_192bit);
  3790     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
  3792     // aligned case: load input into G3 and G4
  3793     __ ldx(from,0,G3);
  3794     __ ldx(from,8,G4);
  3795     __ ba_short(L_192bit_transform);
  3797     __ BIND(L_load_misaligned_input_192bit);
  3798     // can clobber F48, F50 and F52 as they are not used in 128 and 192-bit key encryption
  3799     __ alignaddr(from, G0, from);
  3800     __ ldf(FloatRegisterImpl::D, from, 0, F48);
  3801     __ ldf(FloatRegisterImpl::D, from, 8, F50);
  3802     __ ldf(FloatRegisterImpl::D, from, 16, F52);
  3803     __ faligndata(F48, F50, F48);
  3804     __ faligndata(F50, F52, F50);
  3805     __ movdtox(F48, G3);
  3806     __ movdtox(F50, G4);
  3807     __ mov(L1, from);
  3809     __ BIND(L_192bit_transform);
  3810     __ xor3(G1,G3,G3);
  3811     __ xor3(G5,G4,G4);
  3812     __ movxtod(G3,F56);
  3813     __ movxtod(G4,F58);
  3814     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
  3815     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
  3817     // TWELEVE_EROUNDS
  3818     for ( int i = 0;  i <= 40; i += 8 ) {
  3819       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
  3820       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
  3821       if (i != 40 ) {
  3822         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
  3823         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
  3824       } else {
  3825         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
  3826         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
  3830     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  3831     __ andcc(to, 7, L1);
  3832     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_192bit);
  3833     __ delayed()->edge8n(to, G0, L2);
  3835     // aligned case: store output into the destination array
  3836     __ stf(FloatRegisterImpl::D, F60, to, 0);
  3837     __ stf(FloatRegisterImpl::D, F62, to, 8);
  3838     __ ba_short(L_check_loop_end_192bit);
  3840     __ BIND(L_store_misaligned_output_192bit);
  3841     __ add(to, 8, L3);
  3842     __ mov(8, L4);
  3843     __ sub(L4, L1, L4);
  3844     __ alignaddr(L4, G0, L4);
  3845     __ movdtox(F60, L6);
  3846     __ movdtox(F62, L7);
  3847     __ faligndata(F60, F60, F60);
  3848     __ faligndata(F62, F62, F62);
  3849     __ mov(to, L5);
  3850     __ and3(to, -8, to);
  3851     __ and3(L3, -8, L3);
  3852     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
  3853     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
  3854     __ add(to, 8, to);
  3855     __ add(L3, 8, L3);
  3856     __ orn(G0, L2, L2);
  3857     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
  3858     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
  3859     __ mov(L5, to);
  3860     __ movxtod(L6, F60);
  3861     __ movxtod(L7, F62);
  3863     __ BIND(L_check_loop_end_192bit);
  3864     __ add(from, 16, from);
  3865     __ subcc(len_reg, 16, len_reg);
  3866     __ add(to, 16, to);
  3867     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192);
  3868     __ delayed()->nop();
  3869     // re-init intial vector for next block, 8-byte alignment is guaranteed
  3870     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
  3871     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
  3872     __ mov(L0, I0);
  3873     __ ret();
  3874     __ delayed()->restore();
  3876     __ align(OptoLoopAlignment);
  3877     __ BIND(L_cbcenc256);
  3878     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  3879     __ andcc(from, 7, G0);
  3880     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_256bit);
  3881     __ delayed()->mov(from, L1); // save original 'from' address before alignaddr
  3883     // aligned case: load input into G3 and G4
  3884     __ ldx(from,0,G3);
  3885     __ ldx(from,8,G4);
  3886     __ ba_short(L_256bit_transform);
  3888     __ BIND(L_load_misaligned_input_256bit);
  3889     // cannot clobber F48, F50 and F52. F56, F58 can be used though
  3890     __ alignaddr(from, G0, from);
  3891     __ movdtox(F60, L2); // save F60 before overwriting
  3892     __ ldf(FloatRegisterImpl::D, from, 0, F56);
  3893     __ ldf(FloatRegisterImpl::D, from, 8, F58);
  3894     __ ldf(FloatRegisterImpl::D, from, 16, F60);
  3895     __ faligndata(F56, F58, F56);
  3896     __ faligndata(F58, F60, F58);
  3897     __ movdtox(F56, G3);
  3898     __ movdtox(F58, G4);
  3899     __ mov(L1, from);
  3900     __ movxtod(L2, F60);
  3902     __ BIND(L_256bit_transform);
  3903     __ xor3(G1,G3,G3);
  3904     __ xor3(G5,G4,G4);
  3905     __ movxtod(G3,F56);
  3906     __ movxtod(G4,F58);
  3907     __ fxor(FloatRegisterImpl::D, F60, F56, F60);
  3908     __ fxor(FloatRegisterImpl::D, F62, F58, F62);
  3910     // FOURTEEN_EROUNDS
  3911     for ( int i = 0;  i <= 48; i += 8 ) {
  3912       __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
  3913       __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
  3914       if (i != 48 ) {
  3915         __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
  3916         __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
  3917       } else {
  3918         __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
  3919         __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
  3923     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  3924     __ andcc(to, 7, L1);
  3925     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_256bit);
  3926     __ delayed()->edge8n(to, G0, L2);
  3928     // aligned case: store output into the destination array
  3929     __ stf(FloatRegisterImpl::D, F60, to, 0);
  3930     __ stf(FloatRegisterImpl::D, F62, to, 8);
  3931     __ ba_short(L_check_loop_end_256bit);
  3933     __ BIND(L_store_misaligned_output_256bit);
  3934     __ add(to, 8, L3);
  3935     __ mov(8, L4);
  3936     __ sub(L4, L1, L4);
  3937     __ alignaddr(L4, G0, L4);
  3938     __ movdtox(F60, L6);
  3939     __ movdtox(F62, L7);
  3940     __ faligndata(F60, F60, F60);
  3941     __ faligndata(F62, F62, F62);
  3942     __ mov(to, L5);
  3943     __ and3(to, -8, to);
  3944     __ and3(L3, -8, L3);
  3945     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
  3946     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
  3947     __ add(to, 8, to);
  3948     __ add(L3, 8, L3);
  3949     __ orn(G0, L2, L2);
  3950     __ stpartialf(to, L2, F60, Assembler::ASI_PST8_PRIMARY);
  3951     __ stpartialf(L3, L2, F62, Assembler::ASI_PST8_PRIMARY);
  3952     __ mov(L5, to);
  3953     __ movxtod(L6, F60);
  3954     __ movxtod(L7, F62);
  3956     __ BIND(L_check_loop_end_256bit);
  3957     __ add(from, 16, from);
  3958     __ subcc(len_reg, 16, len_reg);
  3959     __ add(to, 16, to);
  3960     __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256);
  3961     __ delayed()->nop();
  3962     // re-init intial vector for next block, 8-byte alignment is guaranteed
  3963     __ stf(FloatRegisterImpl::D, F60, rvec, 0);
  3964     __ stf(FloatRegisterImpl::D, F62, rvec, 8);
  3965     __ mov(L0, I0);
  3966     __ ret();
  3967     __ delayed()->restore();
  3969     return start;
  3972   address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
  3973     assert((arrayOopDesc::base_offset_in_bytes(T_INT) & 7) == 0,
  3974            "the following code assumes that first element of an int array is aligned to 8 bytes");
  3975     assert((arrayOopDesc::base_offset_in_bytes(T_BYTE) & 7) == 0,
  3976            "the following code assumes that first element of a byte array is aligned to 8 bytes");
  3977     __ align(CodeEntryAlignment);
  3978     StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
  3979     Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start;
  3980     Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256;
  3981     Label L_load_misaligned_input_first_block, L_transform_first_block, L_load_misaligned_next2_blocks128, L_transform_next2_blocks128;
  3982     Label L_load_misaligned_next2_blocks192, L_transform_next2_blocks192, L_load_misaligned_next2_blocks256, L_transform_next2_blocks256;
  3983     Label L_store_misaligned_output_first_block, L_check_decrypt_end, L_store_misaligned_output_next2_blocks128;
  3984     Label L_check_decrypt_loop_end128, L_store_misaligned_output_next2_blocks192, L_check_decrypt_loop_end192;
  3985     Label L_store_misaligned_output_next2_blocks256, L_check_decrypt_loop_end256;
  3986     address start = __ pc();
  3987     Register from = I0; // source byte array
  3988     Register to = I1;   // destination byte array
  3989     Register key = I2;  // expanded key array
  3990     Register rvec = I3; // init vector
  3991     const Register len_reg = I4; // cipher length
  3992     const Register original_key = I5;  // original key array only required during decryption
  3993     const Register keylen = L6;  // reg for storing expanded key array length
  3995     __ save_frame(0); //args are read from I* registers since we save the frame in the beginning
  3996     // save cipher len to return in the end
  3997     __ mov(len_reg, L7);
  3999     // load original key from SunJCE expanded decryption key
  4000     // Since we load original key buffer starting first element, 8-byte alignment is guaranteed
  4001     for ( int i = 0;  i <= 3; i++ ) {
  4002       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
  4005     // load initial vector, 8-byte alignment is guaranteed
  4006     __ ldx(rvec,0,L0);
  4007     __ ldx(rvec,8,L1);
  4009     // read expanded key array length
  4010     __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
  4012     // 256-bit original key size
  4013     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
  4015     // 192-bit original key size
  4016     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
  4018     // 128-bit original key size
  4019     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
  4020     for ( int i = 0;  i <= 36; i += 4 ) {
  4021       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
  4022       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
  4025     // load expanded key[last-1] and key[last] elements
  4026     __ movdtox(F40,L2);
  4027     __ movdtox(F42,L3);
  4029     __ and3(len_reg, 16, L4);
  4030     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks128);
  4031     __ nop();
  4033     __ ba_short(L_dec_first_block_start);
  4035     __ BIND(L_expand192bit);
  4036     // load rest of the 192-bit key
  4037     __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
  4038     __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
  4040     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
  4041     for ( int i = 0;  i <= 36; i += 6 ) {
  4042       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
  4043       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
  4044       __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
  4046     __ aes_kexpand1(F42, F46, 7, F48);
  4047     __ aes_kexpand2(F44, F48, F50);
  4049     // load expanded key[last-1] and key[last] elements
  4050     __ movdtox(F48,L2);
  4051     __ movdtox(F50,L3);
  4053     __ and3(len_reg, 16, L4);
  4054     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks192);
  4055     __ nop();
  4057     __ ba_short(L_dec_first_block_start);
  4059     __ BIND(L_expand256bit);
  4060     // load rest of the 256-bit key
  4061     for ( int i = 4;  i <= 7; i++ ) {
  4062       __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
  4065     // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
  4066     for ( int i = 0;  i <= 40; i += 8 ) {
  4067       __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
  4068       __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
  4069       __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
  4070       __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
  4072     __ aes_kexpand1(F48, F54, 6, F56);
  4073     __ aes_kexpand2(F50, F56, F58);
  4075     // load expanded key[last-1] and key[last] elements
  4076     __ movdtox(F56,L2);
  4077     __ movdtox(F58,L3);
  4079     __ and3(len_reg, 16, L4);
  4080     __ br_null_short(L4, Assembler::pt, L_dec_next2_blocks256);
  4082     __ BIND(L_dec_first_block_start);
  4083     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  4084     __ andcc(from, 7, G0);
  4085     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_input_first_block);
  4086     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
  4088     // aligned case: load input into L4 and L5
  4089     __ ldx(from,0,L4);
  4090     __ ldx(from,8,L5);
  4091     __ ba_short(L_transform_first_block);
  4093     __ BIND(L_load_misaligned_input_first_block);
  4094     __ alignaddr(from, G0, from);
  4095     // F58, F60, F62 can be clobbered
  4096     __ ldf(FloatRegisterImpl::D, from, 0, F58);
  4097     __ ldf(FloatRegisterImpl::D, from, 8, F60);
  4098     __ ldf(FloatRegisterImpl::D, from, 16, F62);
  4099     __ faligndata(F58, F60, F58);
  4100     __ faligndata(F60, F62, F60);
  4101     __ movdtox(F58, L4);
  4102     __ movdtox(F60, L5);
  4103     __ mov(G1, from);
  4105     __ BIND(L_transform_first_block);
  4106     __ xor3(L2,L4,G1);
  4107     __ movxtod(G1,F60);
  4108     __ xor3(L3,L5,G1);
  4109     __ movxtod(G1,F62);
  4111     // 128-bit original key size
  4112     __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128);
  4114     // 192-bit original key size
  4115     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192);
  4117     __ aes_dround23(F54, F60, F62, F58);
  4118     __ aes_dround01(F52, F60, F62, F56);
  4119     __ aes_dround23(F50, F56, F58, F62);
  4120     __ aes_dround01(F48, F56, F58, F60);
  4122     __ BIND(L_dec_first_block192);
  4123     __ aes_dround23(F46, F60, F62, F58);
  4124     __ aes_dround01(F44, F60, F62, F56);
  4125     __ aes_dround23(F42, F56, F58, F62);
  4126     __ aes_dround01(F40, F56, F58, F60);
  4128     __ BIND(L_dec_first_block128);
  4129     for ( int i = 38;  i >= 6; i -= 8 ) {
  4130       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
  4131       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
  4132       if ( i != 6) {
  4133         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
  4134         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
  4135       } else {
  4136         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
  4137         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
  4141     __ movxtod(L0,F56);
  4142     __ movxtod(L1,F58);
  4143     __ mov(L4,L0);
  4144     __ mov(L5,L1);
  4145     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
  4146     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
  4148     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  4149     __ andcc(to, 7, G1);
  4150     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_first_block);
  4151     __ delayed()->edge8n(to, G0, G2);
  4153     // aligned case: store output into the destination array
  4154     __ stf(FloatRegisterImpl::D, F60, to, 0);
  4155     __ stf(FloatRegisterImpl::D, F62, to, 8);
  4156     __ ba_short(L_check_decrypt_end);
  4158     __ BIND(L_store_misaligned_output_first_block);
  4159     __ add(to, 8, G3);
  4160     __ mov(8, G4);
  4161     __ sub(G4, G1, G4);
  4162     __ alignaddr(G4, G0, G4);
  4163     __ faligndata(F60, F60, F60);
  4164     __ faligndata(F62, F62, F62);
  4165     __ mov(to, G1);
  4166     __ and3(to, -8, to);
  4167     __ and3(G3, -8, G3);
  4168     __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
  4169     __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
  4170     __ add(to, 8, to);
  4171     __ add(G3, 8, G3);
  4172     __ orn(G0, G2, G2);
  4173     __ stpartialf(to, G2, F60, Assembler::ASI_PST8_PRIMARY);
  4174     __ stpartialf(G3, G2, F62, Assembler::ASI_PST8_PRIMARY);
  4175     __ mov(G1, to);
  4177     __ BIND(L_check_decrypt_end);
  4178     __ add(from, 16, from);
  4179     __ add(to, 16, to);
  4180     __ subcc(len_reg, 16, len_reg);
  4181     __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end);
  4182     __ delayed()->nop();
  4184     // 256-bit original key size
  4185     __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256);
  4187     // 192-bit original key size
  4188     __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192);
  4190     __ align(OptoLoopAlignment);
  4191     __ BIND(L_dec_next2_blocks128);
  4192     __ nop();
  4194     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  4195     __ andcc(from, 7, G0);
  4196     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks128);
  4197     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
  4199     // aligned case: load input into G4, G5, L4 and L5
  4200     __ ldx(from,0,G4);
  4201     __ ldx(from,8,G5);
  4202     __ ldx(from,16,L4);
  4203     __ ldx(from,24,L5);
  4204     __ ba_short(L_transform_next2_blocks128);
  4206     __ BIND(L_load_misaligned_next2_blocks128);
  4207     __ alignaddr(from, G0, from);
  4208     // F40, F42, F58, F60, F62 can be clobbered
  4209     __ ldf(FloatRegisterImpl::D, from, 0, F40);
  4210     __ ldf(FloatRegisterImpl::D, from, 8, F42);
  4211     __ ldf(FloatRegisterImpl::D, from, 16, F60);
  4212     __ ldf(FloatRegisterImpl::D, from, 24, F62);
  4213     __ ldf(FloatRegisterImpl::D, from, 32, F58);
  4214     __ faligndata(F40, F42, F40);
  4215     __ faligndata(F42, F60, F42);
  4216     __ faligndata(F60, F62, F60);
  4217     __ faligndata(F62, F58, F62);
  4218     __ movdtox(F40, G4);
  4219     __ movdtox(F42, G5);
  4220     __ movdtox(F60, L4);
  4221     __ movdtox(F62, L5);
  4222     __ mov(G1, from);
  4224     __ BIND(L_transform_next2_blocks128);
  4225     // F40:F42 used for first 16-bytes
  4226     __ xor3(L2,G4,G1);
  4227     __ movxtod(G1,F40);
  4228     __ xor3(L3,G5,G1);
  4229     __ movxtod(G1,F42);
  4231     // F60:F62 used for next 16-bytes
  4232     __ xor3(L2,L4,G1);
  4233     __ movxtod(G1,F60);
  4234     __ xor3(L3,L5,G1);
  4235     __ movxtod(G1,F62);
  4237     for ( int i = 38;  i >= 6; i -= 8 ) {
  4238       __ aes_dround23(as_FloatRegister(i), F40, F42, F44);
  4239       __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46);
  4240       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
  4241       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
  4242       if (i != 6 ) {
  4243         __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42);
  4244         __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40);
  4245         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
  4246         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
  4247       } else {
  4248         __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42);
  4249         __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40);
  4250         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
  4251         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
  4255     __ movxtod(L0,F46);
  4256     __ movxtod(L1,F44);
  4257     __ fxor(FloatRegisterImpl::D, F46, F40, F40);
  4258     __ fxor(FloatRegisterImpl::D, F44, F42, F42);
  4260     __ movxtod(G4,F56);
  4261     __ movxtod(G5,F58);
  4262     __ mov(L4,L0);
  4263     __ mov(L5,L1);
  4264     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
  4265     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
  4267     // For mis-aligned store of 32 bytes of result we can do:
  4268     // Circular right-shift all 4 FP registers so that 'head' and 'tail'
  4269     // parts that need to be stored starting at mis-aligned address are in a FP reg
  4270     // the other 3 FP regs can thus be stored using regular store
  4271     // we then use the edge + partial-store mechanism to store the 'head' and 'tail' parts
  4273     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  4274     __ andcc(to, 7, G1);
  4275     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks128);
  4276     __ delayed()->edge8n(to, G0, G2);
  4278     // aligned case: store output into the destination array
  4279     __ stf(FloatRegisterImpl::D, F40, to, 0);
  4280     __ stf(FloatRegisterImpl::D, F42, to, 8);
  4281     __ stf(FloatRegisterImpl::D, F60, to, 16);
  4282     __ stf(FloatRegisterImpl::D, F62, to, 24);
  4283     __ ba_short(L_check_decrypt_loop_end128);
  4285     __ BIND(L_store_misaligned_output_next2_blocks128);
  4286     __ mov(8, G4);
  4287     __ sub(G4, G1, G4);
  4288     __ alignaddr(G4, G0, G4);
  4289     __ faligndata(F40, F42, F56); // F56 can be clobbered
  4290     __ faligndata(F42, F60, F42);
  4291     __ faligndata(F60, F62, F60);
  4292     __ faligndata(F62, F40, F40);
  4293     __ mov(to, G1);
  4294     __ and3(to, -8, to);
  4295     __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
  4296     __ stf(FloatRegisterImpl::D, F56, to, 8);
  4297     __ stf(FloatRegisterImpl::D, F42, to, 16);
  4298     __ stf(FloatRegisterImpl::D, F60, to, 24);
  4299     __ add(to, 32, to);
  4300     __ orn(G0, G2, G2);
  4301     __ stpartialf(to, G2, F40, Assembler::ASI_PST8_PRIMARY);
  4302     __ mov(G1, to);
  4304     __ BIND(L_check_decrypt_loop_end128);
  4305     __ add(from, 32, from);
  4306     __ add(to, 32, to);
  4307     __ subcc(len_reg, 32, len_reg);
  4308     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128);
  4309     __ delayed()->nop();
  4310     __ ba_short(L_cbcdec_end);
  4312     __ align(OptoLoopAlignment);
  4313     __ BIND(L_dec_next2_blocks192);
  4314     __ nop();
  4316     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  4317     __ andcc(from, 7, G0);
  4318     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks192);
  4319     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
  4321     // aligned case: load input into G4, G5, L4 and L5
  4322     __ ldx(from,0,G4);
  4323     __ ldx(from,8,G5);
  4324     __ ldx(from,16,L4);
  4325     __ ldx(from,24,L5);
  4326     __ ba_short(L_transform_next2_blocks192);
  4328     __ BIND(L_load_misaligned_next2_blocks192);
  4329     __ alignaddr(from, G0, from);
  4330     // F48, F50, F52, F60, F62 can be clobbered
  4331     __ ldf(FloatRegisterImpl::D, from, 0, F48);
  4332     __ ldf(FloatRegisterImpl::D, from, 8, F50);
  4333     __ ldf(FloatRegisterImpl::D, from, 16, F60);
  4334     __ ldf(FloatRegisterImpl::D, from, 24, F62);
  4335     __ ldf(FloatRegisterImpl::D, from, 32, F52);
  4336     __ faligndata(F48, F50, F48);
  4337     __ faligndata(F50, F60, F50);
  4338     __ faligndata(F60, F62, F60);
  4339     __ faligndata(F62, F52, F62);
  4340     __ movdtox(F48, G4);
  4341     __ movdtox(F50, G5);
  4342     __ movdtox(F60, L4);
  4343     __ movdtox(F62, L5);
  4344     __ mov(G1, from);
  4346     __ BIND(L_transform_next2_blocks192);
  4347     // F48:F50 used for first 16-bytes
  4348     __ xor3(L2,G4,G1);
  4349     __ movxtod(G1,F48);
  4350     __ xor3(L3,G5,G1);
  4351     __ movxtod(G1,F50);
  4353     // F60:F62 used for next 16-bytes
  4354     __ xor3(L2,L4,G1);
  4355     __ movxtod(G1,F60);
  4356     __ xor3(L3,L5,G1);
  4357     __ movxtod(G1,F62);
  4359     for ( int i = 46;  i >= 6; i -= 8 ) {
  4360       __ aes_dround23(as_FloatRegister(i), F48, F50, F52);
  4361       __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54);
  4362       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
  4363       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
  4364       if (i != 6 ) {
  4365         __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50);
  4366         __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48);
  4367         __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
  4368         __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
  4369       } else {
  4370         __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50);
  4371         __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48);
  4372         __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
  4373         __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
  4377     __ movxtod(L0,F54);
  4378     __ movxtod(L1,F52);
  4379     __ fxor(FloatRegisterImpl::D, F54, F48, F48);
  4380     __ fxor(FloatRegisterImpl::D, F52, F50, F50);
  4382     __ movxtod(G4,F56);
  4383     __ movxtod(G5,F58);
  4384     __ mov(L4,L0);
  4385     __ mov(L5,L1);
  4386     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
  4387     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
  4389     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  4390     __ andcc(to, 7, G1);
  4391     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks192);
  4392     __ delayed()->edge8n(to, G0, G2);
  4394     // aligned case: store output into the destination array
  4395     __ stf(FloatRegisterImpl::D, F48, to, 0);
  4396     __ stf(FloatRegisterImpl::D, F50, to, 8);
  4397     __ stf(FloatRegisterImpl::D, F60, to, 16);
  4398     __ stf(FloatRegisterImpl::D, F62, to, 24);
  4399     __ ba_short(L_check_decrypt_loop_end192);
  4401     __ BIND(L_store_misaligned_output_next2_blocks192);
  4402     __ mov(8, G4);
  4403     __ sub(G4, G1, G4);
  4404     __ alignaddr(G4, G0, G4);
  4405     __ faligndata(F48, F50, F56); // F56 can be clobbered
  4406     __ faligndata(F50, F60, F50);
  4407     __ faligndata(F60, F62, F60);
  4408     __ faligndata(F62, F48, F48);
  4409     __ mov(to, G1);
  4410     __ and3(to, -8, to);
  4411     __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
  4412     __ stf(FloatRegisterImpl::D, F56, to, 8);
  4413     __ stf(FloatRegisterImpl::D, F50, to, 16);
  4414     __ stf(FloatRegisterImpl::D, F60, to, 24);
  4415     __ add(to, 32, to);
  4416     __ orn(G0, G2, G2);
  4417     __ stpartialf(to, G2, F48, Assembler::ASI_PST8_PRIMARY);
  4418     __ mov(G1, to);
  4420     __ BIND(L_check_decrypt_loop_end192);
  4421     __ add(from, 32, from);
  4422     __ add(to, 32, to);
  4423     __ subcc(len_reg, 32, len_reg);
  4424     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192);
  4425     __ delayed()->nop();
  4426     __ ba_short(L_cbcdec_end);
  4428     __ align(OptoLoopAlignment);
  4429     __ BIND(L_dec_next2_blocks256);
  4430     __ nop();
  4432     // check for 8-byte alignment since source byte array may have an arbitrary alignment if offset mod 8 is non-zero
  4433     __ andcc(from, 7, G0);
  4434     __ br(Assembler::notZero, true, Assembler::pn, L_load_misaligned_next2_blocks256);
  4435     __ delayed()->mov(from, G1); // save original 'from' address before alignaddr
  4437     // aligned case: load input into G4, G5, L4 and L5
  4438     __ ldx(from,0,G4);
  4439     __ ldx(from,8,G5);
  4440     __ ldx(from,16,L4);
  4441     __ ldx(from,24,L5);
  4442     __ ba_short(L_transform_next2_blocks256);
  4444     __ BIND(L_load_misaligned_next2_blocks256);
  4445     __ alignaddr(from, G0, from);
  4446     // F0, F2, F4, F60, F62 can be clobbered
  4447     __ ldf(FloatRegisterImpl::D, from, 0, F0);
  4448     __ ldf(FloatRegisterImpl::D, from, 8, F2);
  4449     __ ldf(FloatRegisterImpl::D, from, 16, F60);
  4450     __ ldf(FloatRegisterImpl::D, from, 24, F62);
  4451     __ ldf(FloatRegisterImpl::D, from, 32, F4);
  4452     __ faligndata(F0, F2, F0);
  4453     __ faligndata(F2, F60, F2);
  4454     __ faligndata(F60, F62, F60);
  4455     __ faligndata(F62, F4, F62);
  4456     __ movdtox(F0, G4);
  4457     __ movdtox(F2, G5);
  4458     __ movdtox(F60, L4);
  4459     __ movdtox(F62, L5);
  4460     __ mov(G1, from);
  4462     __ BIND(L_transform_next2_blocks256);
  4463     // F0:F2 used for first 16-bytes
  4464     __ xor3(L2,G4,G1);
  4465     __ movxtod(G1,F0);
  4466     __ xor3(L3,G5,G1);
  4467     __ movxtod(G1,F2);
  4469     // F60:F62 used for next 16-bytes
  4470     __ xor3(L2,L4,G1);
  4471     __ movxtod(G1,F60);
  4472     __ xor3(L3,L5,G1);
  4473     __ movxtod(G1,F62);
  4475     __ aes_dround23(F54, F0, F2, F4);
  4476     __ aes_dround01(F52, F0, F2, F6);
  4477     __ aes_dround23(F54, F60, F62, F58);
  4478     __ aes_dround01(F52, F60, F62, F56);
  4479     __ aes_dround23(F50, F6, F4, F2);
  4480     __ aes_dround01(F48, F6, F4, F0);
  4481     __ aes_dround23(F50, F56, F58, F62);
  4482     __ aes_dround01(F48, F56, F58, F60);
  4483     // save F48:F54 in temp registers
  4484     __ movdtox(F54,G2);
  4485     __ movdtox(F52,G3);
  4486     __ movdtox(F50,G6);
  4487     __ movdtox(F48,G1);
  4488     for ( int i = 46;  i >= 14; i -= 8 ) {
  4489       __ aes_dround23(as_FloatRegister(i), F0, F2, F4);
  4490       __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6);
  4491       __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
  4492       __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
  4493       __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2);
  4494       __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0);
  4495       __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
  4496       __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
  4498     // init F48:F54 with F0:F6 values (original key)
  4499     __ ldf(FloatRegisterImpl::D, original_key, 0, F48);
  4500     __ ldf(FloatRegisterImpl::D, original_key, 8, F50);
  4501     __ ldf(FloatRegisterImpl::D, original_key, 16, F52);
  4502     __ ldf(FloatRegisterImpl::D, original_key, 24, F54);
  4503     __ aes_dround23(F54, F0, F2, F4);
  4504     __ aes_dround01(F52, F0, F2, F6);
  4505     __ aes_dround23(F54, F60, F62, F58);
  4506     __ aes_dround01(F52, F60, F62, F56);
  4507     __ aes_dround23_l(F50, F6, F4, F2);
  4508     __ aes_dround01_l(F48, F6, F4, F0);
  4509     __ aes_dround23_l(F50, F56, F58, F62);
  4510     __ aes_dround01_l(F48, F56, F58, F60);
  4511     // re-init F48:F54 with their original values
  4512     __ movxtod(G2,F54);
  4513     __ movxtod(G3,F52);
  4514     __ movxtod(G6,F50);
  4515     __ movxtod(G1,F48);
  4517     __ movxtod(L0,F6);
  4518     __ movxtod(L1,F4);
  4519     __ fxor(FloatRegisterImpl::D, F6, F0, F0);
  4520     __ fxor(FloatRegisterImpl::D, F4, F2, F2);
  4522     __ movxtod(G4,F56);
  4523     __ movxtod(G5,F58);
  4524     __ mov(L4,L0);
  4525     __ mov(L5,L1);
  4526     __ fxor(FloatRegisterImpl::D, F56, F60, F60);
  4527     __ fxor(FloatRegisterImpl::D, F58, F62, F62);
  4529     // check for 8-byte alignment since dest byte array may have arbitrary alignment if offset mod 8 is non-zero
  4530     __ andcc(to, 7, G1);
  4531     __ br(Assembler::notZero, true, Assembler::pn, L_store_misaligned_output_next2_blocks256);
  4532     __ delayed()->edge8n(to, G0, G2);
  4534     // aligned case: store output into the destination array
  4535     __ stf(FloatRegisterImpl::D, F0, to, 0);
  4536     __ stf(FloatRegisterImpl::D, F2, to, 8);
  4537     __ stf(FloatRegisterImpl::D, F60, to, 16);
  4538     __ stf(FloatRegisterImpl::D, F62, to, 24);
  4539     __ ba_short(L_check_decrypt_loop_end256);
  4541     __ BIND(L_store_misaligned_output_next2_blocks256);
  4542     __ mov(8, G4);
  4543     __ sub(G4, G1, G4);
  4544     __ alignaddr(G4, G0, G4);
  4545     __ faligndata(F0, F2, F56); // F56 can be clobbered
  4546     __ faligndata(F2, F60, F2);
  4547     __ faligndata(F60, F62, F60);
  4548     __ faligndata(F62, F0, F0);
  4549     __ mov(to, G1);
  4550     __ and3(to, -8, to);
  4551     __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
  4552     __ stf(FloatRegisterImpl::D, F56, to, 8);
  4553     __ stf(FloatRegisterImpl::D, F2, to, 16);
  4554     __ stf(FloatRegisterImpl::D, F60, to, 24);
  4555     __ add(to, 32, to);
  4556     __ orn(G0, G2, G2);
  4557     __ stpartialf(to, G2, F0, Assembler::ASI_PST8_PRIMARY);
  4558     __ mov(G1, to);
  4560     __ BIND(L_check_decrypt_loop_end256);
  4561     __ add(from, 32, from);
  4562     __ add(to, 32, to);
  4563     __ subcc(len_reg, 32, len_reg);
  4564     __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256);
  4565     __ delayed()->nop();
  4567     __ BIND(L_cbcdec_end);
  4568     // re-init intial vector for next block, 8-byte alignment is guaranteed
  4569     __ stx(L0, rvec, 0);
  4570     __ stx(L1, rvec, 8);
  4571     __ mov(L7, I0);
  4572     __ ret();
  4573     __ delayed()->restore();
  4575     return start;
  4578   void generate_initial() {
  4579     // Generates all stubs and initializes the entry points
  4581     //------------------------------------------------------------------------------------------------------------------------
  4582     // entry points that exist in all platforms
  4583     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
  4584     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
  4585     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
  4587     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
  4588     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
  4590     //------------------------------------------------------------------------------------------------------------------------
  4591     // entry points that are platform specific
  4592     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
  4594     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
  4595     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
  4597 #if !defined(COMPILER2) && !defined(_LP64)
  4598     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
  4599     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
  4600     StubRoutines::_atomic_add_entry          = generate_atomic_add();
  4601     StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
  4602     StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
  4603     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
  4604     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
  4605 #endif  // COMPILER2 !=> _LP64
  4607     // Build this early so it's available for the interpreter.
  4608     StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
  4612   void generate_all() {
  4613     // Generates all stubs and initializes the entry points
  4615     // Generate partial_subtype_check first here since its code depends on
  4616     // UseZeroBaseCompressedOops which is defined after heap initialization.
  4617     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
  4618     // These entry points require SharedInfo::stack0 to be set up in non-core builds
  4619     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
  4620     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
  4621     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
  4623     StubRoutines::_handler_for_unsafe_access_entry =
  4624       generate_handler_for_unsafe_access();
  4626     // support for verify_oop (must happen after universe_init)
  4627     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop_subroutine();
  4629     // arraycopy stubs used by compilers
  4630     generate_arraycopy_stubs();
  4632     // Don't initialize the platform math functions since sparc
  4633     // doesn't have intrinsics for these operations.
  4635     // Safefetch stubs.
  4636     generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
  4637                                                        &StubRoutines::_safefetch32_fault_pc,
  4638                                                        &StubRoutines::_safefetch32_continuation_pc);
  4639     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
  4640                                                        &StubRoutines::_safefetchN_fault_pc,
  4641                                                        &StubRoutines::_safefetchN_continuation_pc);
  4643     // generate AES intrinsics code
  4644     if (UseAESIntrinsics) {
  4645       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
  4646       StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
  4647       StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
  4648       StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
  4653  public:
  4654   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
  4655     // replace the standard masm with a special one:
  4656     _masm = new MacroAssembler(code);
  4658     _stub_count = !all ? 0x100 : 0x200;
  4659     if (all) {
  4660       generate_all();
  4661     } else {
  4662       generate_initial();
  4665     // make sure this stub is available for all local calls
  4666     if (_atomic_add_stub.is_unbound()) {
  4667       // generate a second time, if necessary
  4668       (void) generate_atomic_add();
  4673  private:
  4674   int _stub_count;
  4675   void stub_prolog(StubCodeDesc* cdesc) {
  4676     # ifdef ASSERT
  4677       // put extra information in the stub code, to make it more readable
  4678 #ifdef _LP64
  4679 // Write the high part of the address
  4680 // [RGV] Check if there is a dependency on the size of this prolog
  4681       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
  4682 #endif
  4683       __ emit_data((intptr_t)cdesc,    relocInfo::none);
  4684       __ emit_data(++_stub_count, relocInfo::none);
  4685     # endif
  4686     align(true);
  4689   void align(bool at_header = false) {
  4690     // %%%%% move this constant somewhere else
  4691     // UltraSPARC cache line size is 8 instructions:
  4692     const unsigned int icache_line_size = 32;
  4693     const unsigned int icache_half_line_size = 16;
  4695     if (at_header) {
  4696       while ((intptr_t)(__ pc()) % icache_line_size != 0) {
  4697         __ emit_data(0, relocInfo::none);
  4699     } else {
  4700       while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
  4701         __ nop();
  4706 }; // end class declaration
  4708 void StubGenerator_generate(CodeBuffer* code, bool all) {
  4709   StubGenerator g(code, all);

mercurial