src/cpu/sparc/vm/stubGenerator_sparc.cpp

Fri, 07 Jun 2013 16:46:37 -0700

author
morris
date
Fri, 07 Jun 2013 16:46:37 -0700
changeset 5283
46c544b8fbfc
parent 4325
d2f8c38e543d
child 5400
980532a806a5
permissions
-rw-r--r--

8008407: remove SPARC V8 support
Summary: Removed most of the SPARC V8 instructions
Reviewed-by: kvn, twisti

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/macroAssembler.inline.hpp"
    27 #include "interpreter/interpreter.hpp"
    28 #include "nativeInst_sparc.hpp"
    29 #include "oops/instanceOop.hpp"
    30 #include "oops/method.hpp"
    31 #include "oops/objArrayKlass.hpp"
    32 #include "oops/oop.inline.hpp"
    33 #include "prims/methodHandles.hpp"
    34 #include "runtime/frame.inline.hpp"
    35 #include "runtime/handles.inline.hpp"
    36 #include "runtime/sharedRuntime.hpp"
    37 #include "runtime/stubCodeGenerator.hpp"
    38 #include "runtime/stubRoutines.hpp"
    39 #include "runtime/thread.inline.hpp"
    40 #include "utilities/top.hpp"
    41 #ifdef COMPILER2
    42 #include "opto/runtime.hpp"
    43 #endif
    45 // Declaration and definition of StubGenerator (no .hpp file).
    46 // For a more detailed description of the stub routine structure
    47 // see the comment in stubRoutines.hpp.
    49 #define __ _masm->
    51 #ifdef PRODUCT
    52 #define BLOCK_COMMENT(str) /* nothing */
    53 #else
    54 #define BLOCK_COMMENT(str) __ block_comment(str)
    55 #endif
    57 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    59 // Note:  The register L7 is used as L7_thread_cache, and may not be used
    60 //        any other way within this module.
    63 static const Register& Lstub_temp = L2;
    65 // -------------------------------------------------------------------------------------------------------------------------
    66 // Stub Code definitions
    68 static address handle_unsafe_access() {
    69   JavaThread* thread = JavaThread::current();
    70   address pc  = thread->saved_exception_pc();
    71   address npc = thread->saved_exception_npc();
    72   // pc is the instruction which we must emulate
    73   // doing a no-op is fine:  return garbage from the load
    75   // request an async exception
    76   thread->set_pending_unsafe_access_error();
    78   // return address of next instruction to execute
    79   return npc;
    80 }
    82 class StubGenerator: public StubCodeGenerator {
    83  private:
    85 #ifdef PRODUCT
    86 #define inc_counter_np(a,b,c) (0)
    87 #else
    88 #define inc_counter_np(counter, t1, t2) \
    89   BLOCK_COMMENT("inc_counter " #counter); \
    90   __ inc_counter(&counter, t1, t2);
    91 #endif
    93   //----------------------------------------------------------------------------------------------------
    94   // Call stubs are used to call Java from C
    96   address generate_call_stub(address& return_pc) {
    97     StubCodeMark mark(this, "StubRoutines", "call_stub");
    98     address start = __ pc();
   100     // Incoming arguments:
   101     //
   102     // o0         : call wrapper address
   103     // o1         : result (address)
   104     // o2         : result type
   105     // o3         : method
   106     // o4         : (interpreter) entry point
   107     // o5         : parameters (address)
   108     // [sp + 0x5c]: parameter size (in words)
   109     // [sp + 0x60]: thread
   110     //
   111     // +---------------+ <--- sp + 0
   112     // |               |
   113     // . reg save area .
   114     // |               |
   115     // +---------------+ <--- sp + 0x40
   116     // |               |
   117     // . extra 7 slots .
   118     // |               |
   119     // +---------------+ <--- sp + 0x5c
   120     // |  param. size  |
   121     // +---------------+ <--- sp + 0x60
   122     // |    thread     |
   123     // +---------------+
   124     // |               |
   126     // note: if the link argument position changes, adjust
   127     //       the code in frame::entry_frame_call_wrapper()
   129     const Argument link           = Argument(0, false); // used only for GC
   130     const Argument result         = Argument(1, false);
   131     const Argument result_type    = Argument(2, false);
   132     const Argument method         = Argument(3, false);
   133     const Argument entry_point    = Argument(4, false);
   134     const Argument parameters     = Argument(5, false);
   135     const Argument parameter_size = Argument(6, false);
   136     const Argument thread         = Argument(7, false);
   138     // setup thread register
   139     __ ld_ptr(thread.as_address(), G2_thread);
   140     __ reinit_heapbase();
   142 #ifdef ASSERT
   143     // make sure we have no pending exceptions
   144     { const Register t = G3_scratch;
   145       Label L;
   146       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
   147       __ br_null_short(t, Assembler::pt, L);
   148       __ stop("StubRoutines::call_stub: entered with pending exception");
   149       __ bind(L);
   150     }
   151 #endif
   153     // create activation frame & allocate space for parameters
   154     { const Register t = G3_scratch;
   155       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
   156       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
   157       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
   158       __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
   159       __ neg(t);                                                // negate so it can be used with save
   160       __ save(SP, t, SP);                                       // setup new frame
   161     }
   163     // +---------------+ <--- sp + 0
   164     // |               |
   165     // . reg save area .
   166     // |               |
   167     // +---------------+ <--- sp + 0x40
   168     // |               |
   169     // . extra 7 slots .
   170     // |               |
   171     // +---------------+ <--- sp + 0x5c
   172     // |  empty slot   |      (only if parameter size is even)
   173     // +---------------+
   174     // |               |
   175     // .  parameters   .
   176     // |               |
   177     // +---------------+ <--- fp + 0
   178     // |               |
   179     // . reg save area .
   180     // |               |
   181     // +---------------+ <--- fp + 0x40
   182     // |               |
   183     // . extra 7 slots .
   184     // |               |
   185     // +---------------+ <--- fp + 0x5c
   186     // |  param. size  |
   187     // +---------------+ <--- fp + 0x60
   188     // |    thread     |
   189     // +---------------+
   190     // |               |
   192     // pass parameters if any
   193     BLOCK_COMMENT("pass parameters if any");
   194     { const Register src = parameters.as_in().as_register();
   195       const Register dst = Lentry_args;
   196       const Register tmp = G3_scratch;
   197       const Register cnt = G4_scratch;
   199       // test if any parameters & setup of Lentry_args
   200       Label exit;
   201       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
   202       __ add( FP, STACK_BIAS, dst );
   203       __ cmp_zero_and_br(Assembler::zero, cnt, exit);
   204       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
   206       // copy parameters if any
   207       Label loop;
   208       __ BIND(loop);
   209       // Store parameter value
   210       __ ld_ptr(src, 0, tmp);
   211       __ add(src, BytesPerWord, src);
   212       __ st_ptr(tmp, dst, 0);
   213       __ deccc(cnt);
   214       __ br(Assembler::greater, false, Assembler::pt, loop);
   215       __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
   217       // done
   218       __ BIND(exit);
   219     }
   221     // setup parameters, method & call Java function
   222 #ifdef ASSERT
   223     // layout_activation_impl checks it's notion of saved SP against
   224     // this register, so if this changes update it as well.
   225     const Register saved_SP = Lscratch;
   226     __ mov(SP, saved_SP);                               // keep track of SP before call
   227 #endif
   229     // setup parameters
   230     const Register t = G3_scratch;
   231     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
   232     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
   233     __ sub(FP, t, Gargs);                              // setup parameter pointer
   234 #ifdef _LP64
   235     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
   236 #endif
   237     __ mov(SP, O5_savedSP);
   240     // do the call
   241     //
   242     // the following register must be setup:
   243     //
   244     // G2_thread
   245     // G5_method
   246     // Gargs
   247     BLOCK_COMMENT("call Java function");
   248     __ jmpl(entry_point.as_in().as_register(), G0, O7);
   249     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
   251     BLOCK_COMMENT("call_stub_return_address:");
   252     return_pc = __ pc();
   254     // The callee, if it wasn't interpreted, can return with SP changed so
   255     // we can no longer assert of change of SP.
   257     // store result depending on type
   258     // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
   259     //  is treated as T_INT)
   260     { const Register addr = result     .as_in().as_register();
   261       const Register type = result_type.as_in().as_register();
   262       Label is_long, is_float, is_double, is_object, exit;
   263       __            cmp(type, T_OBJECT);  __ br(Assembler::equal, false, Assembler::pn, is_object);
   264       __ delayed()->cmp(type, T_FLOAT);   __ br(Assembler::equal, false, Assembler::pn, is_float);
   265       __ delayed()->cmp(type, T_DOUBLE);  __ br(Assembler::equal, false, Assembler::pn, is_double);
   266       __ delayed()->cmp(type, T_LONG);    __ br(Assembler::equal, false, Assembler::pn, is_long);
   267       __ delayed()->nop();
   269       // store int result
   270       __ st(O0, addr, G0);
   272       __ BIND(exit);
   273       __ ret();
   274       __ delayed()->restore();
   276       __ BIND(is_object);
   277       __ ba(exit);
   278       __ delayed()->st_ptr(O0, addr, G0);
   280       __ BIND(is_float);
   281       __ ba(exit);
   282       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
   284       __ BIND(is_double);
   285       __ ba(exit);
   286       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
   288       __ BIND(is_long);
   289 #ifdef _LP64
   290       __ ba(exit);
   291       __ delayed()->st_long(O0, addr, G0);      // store entire long
   292 #else
   293 #if defined(COMPILER2)
   294   // All return values are where we want them, except for Longs.  C2 returns
   295   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
   296   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
   297   // build we simply always use G1.
   298   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
   299   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
   300   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
   302       __ ba(exit);
   303       __ delayed()->stx(G1, addr, G0);  // store entire long
   304 #else
   305       __ st(O1, addr, BytesPerInt);
   306       __ ba(exit);
   307       __ delayed()->st(O0, addr, G0);
   308 #endif /* COMPILER2 */
   309 #endif /* _LP64 */
   310      }
   311      return start;
   312   }
   315   //----------------------------------------------------------------------------------------------------
   316   // Return point for a Java call if there's an exception thrown in Java code.
   317   // The exception is caught and transformed into a pending exception stored in
   318   // JavaThread that can be tested from within the VM.
   319   //
   320   // Oexception: exception oop
   322   address generate_catch_exception() {
   323     StubCodeMark mark(this, "StubRoutines", "catch_exception");
   325     address start = __ pc();
   326     // verify that thread corresponds
   327     __ verify_thread();
   329     const Register& temp_reg = Gtemp;
   330     Address pending_exception_addr    (G2_thread, Thread::pending_exception_offset());
   331     Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset   ());
   332     Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset   ());
   334     // set pending exception
   335     __ verify_oop(Oexception);
   336     __ st_ptr(Oexception, pending_exception_addr);
   337     __ set((intptr_t)__FILE__, temp_reg);
   338     __ st_ptr(temp_reg, exception_file_offset_addr);
   339     __ set((intptr_t)__LINE__, temp_reg);
   340     __ st(temp_reg, exception_line_offset_addr);
   342     // complete return to VM
   343     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
   345     AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
   346     __ jump_to(stub_ret, temp_reg);
   347     __ delayed()->nop();
   349     return start;
   350   }
   353   //----------------------------------------------------------------------------------------------------
   354   // Continuation point for runtime calls returning with a pending exception
   355   // The pending exception check happened in the runtime or native call stub
   356   // The pending exception in Thread is converted into a Java-level exception
   357   //
   358   // Contract with Java-level exception handler: O0 = exception
   359   //                                             O1 = throwing pc
   361   address generate_forward_exception() {
   362     StubCodeMark mark(this, "StubRoutines", "forward_exception");
   363     address start = __ pc();
   365     // Upon entry, O7 has the return address returning into Java
   366     // (interpreted or compiled) code; i.e. the return address
   367     // becomes the throwing pc.
   369     const Register& handler_reg = Gtemp;
   371     Address exception_addr(G2_thread, Thread::pending_exception_offset());
   373 #ifdef ASSERT
   374     // make sure that this code is only executed if there is a pending exception
   375     { Label L;
   376       __ ld_ptr(exception_addr, Gtemp);
   377       __ br_notnull_short(Gtemp, Assembler::pt, L);
   378       __ stop("StubRoutines::forward exception: no pending exception (1)");
   379       __ bind(L);
   380     }
   381 #endif
   383     // compute exception handler into handler_reg
   384     __ get_thread();
   385     __ ld_ptr(exception_addr, Oexception);
   386     __ verify_oop(Oexception);
   387     __ save_frame(0);             // compensates for compiler weakness
   388     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
   389     BLOCK_COMMENT("call exception_handler_for_return_address");
   390     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
   391     __ mov(O0, handler_reg);
   392     __ restore();                 // compensates for compiler weakness
   394     __ ld_ptr(exception_addr, Oexception);
   395     __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
   397 #ifdef ASSERT
   398     // make sure exception is set
   399     { Label L;
   400       __ br_notnull_short(Oexception, Assembler::pt, L);
   401       __ stop("StubRoutines::forward exception: no pending exception (2)");
   402       __ bind(L);
   403     }
   404 #endif
   405     // jump to exception handler
   406     __ jmp(handler_reg, 0);
   407     // clear pending exception
   408     __ delayed()->st_ptr(G0, exception_addr);
   410     return start;
   411   }
   414   //------------------------------------------------------------------------------------------------------------------------
   415   // Continuation point for throwing of implicit exceptions that are not handled in
   416   // the current activation. Fabricates an exception oop and initiates normal
   417   // exception dispatching in this frame. Only callee-saved registers are preserved
   418   // (through the normal register window / RegisterMap handling).
   419   // If the compiler needs all registers to be preserved between the fault
   420   // point and the exception handler then it must assume responsibility for that in
   421   // AbstractCompiler::continuation_for_implicit_null_exception or
   422   // continuation_for_implicit_division_by_zero_exception. All other implicit
   423   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
   424   // either at call sites or otherwise assume that stack unwinding will be initiated,
   425   // so caller saved registers were assumed volatile in the compiler.
   427   // Note that we generate only this stub into a RuntimeStub, because it needs to be
   428   // properly traversed and ignored during GC, so we change the meaning of the "__"
   429   // macro within this method.
   430 #undef __
   431 #define __ masm->
   433   address generate_throw_exception(const char* name, address runtime_entry,
   434                                    Register arg1 = noreg, Register arg2 = noreg) {
   435 #ifdef ASSERT
   436     int insts_size = VerifyThread ? 1 * K : 600;
   437 #else
   438     int insts_size = VerifyThread ? 1 * K : 256;
   439 #endif /* ASSERT */
   440     int locs_size  = 32;
   442     CodeBuffer      code(name, insts_size, locs_size);
   443     MacroAssembler* masm = new MacroAssembler(&code);
   445     __ verify_thread();
   447     // This is an inlined and slightly modified version of call_VM
   448     // which has the ability to fetch the return PC out of thread-local storage
   449     __ assert_not_delayed();
   451     // Note that we always push a frame because on the SPARC
   452     // architecture, for all of our implicit exception kinds at call
   453     // sites, the implicit exception is taken before the callee frame
   454     // is pushed.
   455     __ save_frame(0);
   457     int frame_complete = __ offset();
   459     // Note that we always have a runtime stub frame on the top of stack by this point
   460     Register last_java_sp = SP;
   461     // 64-bit last_java_sp is biased!
   462     __ set_last_Java_frame(last_java_sp, G0);
   463     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
   464     __ save_thread(noreg);
   465     if (arg1 != noreg) {
   466       assert(arg2 != O1, "clobbered");
   467       __ mov(arg1, O1);
   468     }
   469     if (arg2 != noreg) {
   470       __ mov(arg2, O2);
   471     }
   472     // do the call
   473     BLOCK_COMMENT("call runtime_entry");
   474     __ call(runtime_entry, relocInfo::runtime_call_type);
   475     if (!VerifyThread)
   476       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
   477     else
   478       __ delayed()->nop();             // (thread already passed)
   479     __ restore_thread(noreg);
   480     __ reset_last_Java_frame();
   482     // check for pending exceptions. use Gtemp as scratch register.
   483 #ifdef ASSERT
   484     Label L;
   486     Address exception_addr(G2_thread, Thread::pending_exception_offset());
   487     Register scratch_reg = Gtemp;
   488     __ ld_ptr(exception_addr, scratch_reg);
   489     __ br_notnull_short(scratch_reg, Assembler::pt, L);
   490     __ should_not_reach_here();
   491     __ bind(L);
   492 #endif // ASSERT
   493     BLOCK_COMMENT("call forward_exception_entry");
   494     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
   495     // we use O7 linkage so that forward_exception_entry has the issuing PC
   496     __ delayed()->restore();
   498     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
   499     return stub->entry_point();
   500   }
   502 #undef __
   503 #define __ _masm->
   506   // Generate a routine that sets all the registers so we
   507   // can tell if the stop routine prints them correctly.
   508   address generate_test_stop() {
   509     StubCodeMark mark(this, "StubRoutines", "test_stop");
   510     address start = __ pc();
   512     int i;
   514     __ save_frame(0);
   516     static jfloat zero = 0.0, one = 1.0;
   518     // put addr in L0, then load through L0 to F0
   519     __ set((intptr_t)&zero, L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F0);
   520     __ set((intptr_t)&one,  L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
   522     // use add to put 2..18 in F2..F18
   523     for ( i = 2;  i <= 18;  ++i ) {
   524       __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1),  as_FloatRegister(i));
   525     }
   527     // Now put double 2 in F16, double 18 in F18
   528     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
   529     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
   531     // use add to put 20..32 in F20..F32
   532     for (i = 20; i < 32; i += 2) {
   533       __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2),  as_FloatRegister(i));
   534     }
   536     // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
   537     for ( i = 0; i < 8; ++i ) {
   538       if (i < 6) {
   539         __ set(     i, as_iRegister(i));
   540         __ set(16 + i, as_oRegister(i));
   541         __ set(24 + i, as_gRegister(i));
   542       }
   543       __ set( 8 + i, as_lRegister(i));
   544     }
   546     __ stop("testing stop");
   549     __ ret();
   550     __ delayed()->restore();
   552     return start;
   553   }
   556   address generate_stop_subroutine() {
   557     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
   558     address start = __ pc();
   560     __ stop_subroutine();
   562     return start;
   563   }
   565   address generate_flush_callers_register_windows() {
   566     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
   567     address start = __ pc();
   569     __ flushw();
   570     __ retl(false);
   571     __ delayed()->add( FP, STACK_BIAS, O0 );
   572     // The returned value must be a stack pointer whose register save area
   573     // is flushed, and will stay flushed while the caller executes.
   575     return start;
   576   }
   578   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
   579   //
   580   // Arguments:
   581   //
   582   //      exchange_value: O0
   583   //      dest:           O1
   584   //
   585   // Results:
   586   //
   587   //     O0: the value previously stored in dest
   588   //
   589   address generate_atomic_xchg() {
   590     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
   591     address start = __ pc();
   593     if (UseCASForSwap) {
   594       // Use CAS instead of swap, just in case the MP hardware
   595       // prefers to work with just one kind of synch. instruction.
   596       Label retry;
   597       __ BIND(retry);
   598       __ mov(O0, O3);       // scratch copy of exchange value
   599       __ ld(O1, 0, O2);     // observe the previous value
   600       // try to replace O2 with O3
   601       __ cas(O1, O2, O3);
   602       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
   604       __ retl(false);
   605       __ delayed()->mov(O2, O0);  // report previous value to caller
   606     } else {
   607       __ retl(false);
   608       __ delayed()->swap(O1, 0, O0);
   609     }
   611     return start;
   612   }
   615   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
   616   //
   617   // Arguments:
   618   //
   619   //      exchange_value: O0
   620   //      dest:           O1
   621   //      compare_value:  O2
   622   //
   623   // Results:
   624   //
   625   //     O0: the value previously stored in dest
   626   //
   627   address generate_atomic_cmpxchg() {
   628     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
   629     address start = __ pc();
   631     // cmpxchg(dest, compare_value, exchange_value)
   632     __ cas(O1, O2, O0);
   633     __ retl(false);
   634     __ delayed()->nop();
   636     return start;
   637   }
   639   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
   640   //
   641   // Arguments:
   642   //
   643   //      exchange_value: O1:O0
   644   //      dest:           O2
   645   //      compare_value:  O4:O3
   646   //
   647   // Results:
   648   //
   649   //     O1:O0: the value previously stored in dest
   650   //
   651   // Overwrites: G1,G2,G3
   652   //
   653   address generate_atomic_cmpxchg_long() {
   654     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
   655     address start = __ pc();
   657     __ sllx(O0, 32, O0);
   658     __ srl(O1, 0, O1);
   659     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
   660     __ sllx(O3, 32, O3);
   661     __ srl(O4, 0, O4);
   662     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
   663     __ casx(O2, O3, O0);
   664     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
   665     __ retl(false);
   666     __ delayed()->srlx(O0, 32, O0);
   668     return start;
   669   }
   672   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
   673   //
   674   // Arguments:
   675   //
   676   //      add_value: O0   (e.g., +1 or -1)
   677   //      dest:      O1
   678   //
   679   // Results:
   680   //
   681   //     O0: the new value stored in dest
   682   //
   683   // Overwrites: O3
   684   //
   685   address generate_atomic_add() {
   686     StubCodeMark mark(this, "StubRoutines", "atomic_add");
   687     address start = __ pc();
   688     __ BIND(_atomic_add_stub);
   690     Label(retry);
   691     __ BIND(retry);
   693     __ lduw(O1, 0, O2);
   694     __ add(O0, O2, O3);
   695     __ cas(O1, O2, O3);
   696     __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
   697     __ retl(false);
   698     __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
   700     return start;
   701   }
   702   Label _atomic_add_stub;  // called from other stubs
   705   //------------------------------------------------------------------------------------------------------------------------
   706   // The following routine generates a subroutine to throw an asynchronous
   707   // UnknownError when an unsafe access gets a fault that could not be
   708   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
   709   //
   710   // Arguments :
   711   //
   712   //      trapping PC:    O7
   713   //
   714   // Results:
   715   //     posts an asynchronous exception, skips the trapping instruction
   716   //
   718   address generate_handler_for_unsafe_access() {
   719     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
   720     address start = __ pc();
   722     const int preserve_register_words = (64 * 2);
   723     Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
   725     Register Lthread = L7_thread_cache;
   726     int i;
   728     __ save_frame(0);
   729     __ mov(G1, L1);
   730     __ mov(G2, L2);
   731     __ mov(G3, L3);
   732     __ mov(G4, L4);
   733     __ mov(G5, L5);
   734     for (i = 0; i < 64; i += 2) {
   735       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
   736     }
   738     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
   739     BLOCK_COMMENT("call handle_unsafe_access");
   740     __ call(entry_point, relocInfo::runtime_call_type);
   741     __ delayed()->nop();
   743     __ mov(L1, G1);
   744     __ mov(L2, G2);
   745     __ mov(L3, G3);
   746     __ mov(L4, G4);
   747     __ mov(L5, G5);
   748     for (i = 0; i < 64; i += 2) {
   749       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
   750     }
   752     __ verify_thread();
   754     __ jmp(O0, 0);
   755     __ delayed()->restore();
   757     return start;
   758   }
   761   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
   762   // Arguments :
   763   //
   764   //      ret  : O0, returned
   765   //      icc/xcc: set as O0 (depending on wordSize)
   766   //      sub  : O1, argument, not changed
   767   //      super: O2, argument, not changed
   768   //      raddr: O7, blown by call
   769   address generate_partial_subtype_check() {
   770     __ align(CodeEntryAlignment);
   771     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
   772     address start = __ pc();
   773     Label miss;
   775 #if defined(COMPILER2) && !defined(_LP64)
   776     // Do not use a 'save' because it blows the 64-bit O registers.
   777     __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
   778     __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
   779     __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
   780     __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
   781     __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
   782     Register Rret   = O0;
   783     Register Rsub   = O1;
   784     Register Rsuper = O2;
   785 #else
   786     __ save_frame(0);
   787     Register Rret   = I0;
   788     Register Rsub   = I1;
   789     Register Rsuper = I2;
   790 #endif
   792     Register L0_ary_len = L0;
   793     Register L1_ary_ptr = L1;
   794     Register L2_super   = L2;
   795     Register L3_index   = L3;
   797     __ check_klass_subtype_slow_path(Rsub, Rsuper,
   798                                      L0, L1, L2, L3,
   799                                      NULL, &miss);
   801     // Match falls through here.
   802     __ addcc(G0,0,Rret);        // set Z flags, Z result
   804 #if defined(COMPILER2) && !defined(_LP64)
   805     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
   806     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
   807     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
   808     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
   809     __ retl();                  // Result in Rret is zero; flags set to Z
   810     __ delayed()->add(SP,4*wordSize,SP);
   811 #else
   812     __ ret();                   // Result in Rret is zero; flags set to Z
   813     __ delayed()->restore();
   814 #endif
   816     __ BIND(miss);
   817     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
   819 #if defined(COMPILER2) && !defined(_LP64)
   820     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
   821     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
   822     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
   823     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
   824     __ retl();                  // Result in Rret is != 0; flags set to NZ
   825     __ delayed()->add(SP,4*wordSize,SP);
   826 #else
   827     __ ret();                   // Result in Rret is != 0; flags set to NZ
   828     __ delayed()->restore();
   829 #endif
   831     return start;
   832   }
   835   // Called from MacroAssembler::verify_oop
   836   //
   837   address generate_verify_oop_subroutine() {
   838     StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
   840     address start = __ pc();
   842     __ verify_oop_subroutine();
   844     return start;
   845   }
   848   //
   849   // Verify that a register contains clean 32-bits positive value
   850   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
   851   //
   852   //  Input:
   853   //    Rint  -  32-bits value
   854   //    Rtmp  -  scratch
   855   //
   856   void assert_clean_int(Register Rint, Register Rtmp) {
   857 #if defined(ASSERT) && defined(_LP64)
   858     __ signx(Rint, Rtmp);
   859     __ cmp(Rint, Rtmp);
   860     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
   861 #endif
   862   }
   864   //
   865   //  Generate overlap test for array copy stubs
   866   //
   867   //  Input:
   868   //    O0    -  array1
   869   //    O1    -  array2
   870   //    O2    -  element count
   871   //
   872   //  Kills temps:  O3, O4
   873   //
   874   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
   875     assert(no_overlap_target != NULL, "must be generated");
   876     array_overlap_test(no_overlap_target, NULL, log2_elem_size);
   877   }
   878   void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
   879     array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
   880   }
   881   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
   882     const Register from       = O0;
   883     const Register to         = O1;
   884     const Register count      = O2;
   885     const Register to_from    = O3; // to - from
   886     const Register byte_count = O4; // count << log2_elem_size
   888       __ subcc(to, from, to_from);
   889       __ sll_ptr(count, log2_elem_size, byte_count);
   890       if (NOLp == NULL)
   891         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
   892       else
   893         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
   894       __ delayed()->cmp(to_from, byte_count);
   895       if (NOLp == NULL)
   896         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
   897       else
   898         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
   899       __ delayed()->nop();
   900   }
   902   //
   903   //  Generate pre-write barrier for array.
   904   //
   905   //  Input:
   906   //     addr     - register containing starting address
   907   //     count    - register containing element count
   908   //     tmp      - scratch register
   909   //
   910   //  The input registers are overwritten.
   911   //
   912   void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
   913     BarrierSet* bs = Universe::heap()->barrier_set();
   914     switch (bs->kind()) {
   915       case BarrierSet::G1SATBCT:
   916       case BarrierSet::G1SATBCTLogging:
   917         // With G1, don't generate the call if we statically know that the target in uninitialized
   918         if (!dest_uninitialized) {
   919           __ save_frame(0);
   920           // Save the necessary global regs... will be used after.
   921           if (addr->is_global()) {
   922             __ mov(addr, L0);
   923           }
   924           if (count->is_global()) {
   925             __ mov(count, L1);
   926           }
   927           __ mov(addr->after_save(), O0);
   928           // Get the count into O1
   929           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
   930           __ delayed()->mov(count->after_save(), O1);
   931           if (addr->is_global()) {
   932             __ mov(L0, addr);
   933           }
   934           if (count->is_global()) {
   935             __ mov(L1, count);
   936           }
   937           __ restore();
   938         }
   939         break;
   940       case BarrierSet::CardTableModRef:
   941       case BarrierSet::CardTableExtension:
   942       case BarrierSet::ModRef:
   943         break;
   944       default:
   945         ShouldNotReachHere();
   946     }
   947   }
   948   //
   949   //  Generate post-write barrier for array.
   950   //
   951   //  Input:
   952   //     addr     - register containing starting address
   953   //     count    - register containing element count
   954   //     tmp      - scratch register
   955   //
   956   //  The input registers are overwritten.
   957   //
   958   void gen_write_ref_array_post_barrier(Register addr, Register count,
   959                                         Register tmp) {
   960     BarrierSet* bs = Universe::heap()->barrier_set();
   962     switch (bs->kind()) {
   963       case BarrierSet::G1SATBCT:
   964       case BarrierSet::G1SATBCTLogging:
   965         {
   966           // Get some new fresh output registers.
   967           __ save_frame(0);
   968           __ mov(addr->after_save(), O0);
   969           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
   970           __ delayed()->mov(count->after_save(), O1);
   971           __ restore();
   972         }
   973         break;
   974       case BarrierSet::CardTableModRef:
   975       case BarrierSet::CardTableExtension:
   976         {
   977           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
   978           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
   979           assert_different_registers(addr, count, tmp);
   981           Label L_loop;
   983           __ sll_ptr(count, LogBytesPerHeapOop, count);
   984           __ sub(count, BytesPerHeapOop, count);
   985           __ add(count, addr, count);
   986           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
   987           __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
   988           __ srl_ptr(count, CardTableModRefBS::card_shift, count);
   989           __ sub(count, addr, count);
   990           AddressLiteral rs(ct->byte_map_base);
   991           __ set(rs, tmp);
   992         __ BIND(L_loop);
   993           __ stb(G0, tmp, addr);
   994           __ subcc(count, 1, count);
   995           __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
   996           __ delayed()->add(addr, 1, addr);
   997         }
   998         break;
   999       case BarrierSet::ModRef:
  1000         break;
  1001       default:
  1002         ShouldNotReachHere();
  1006   //
  1007   // Generate main code for disjoint arraycopy
  1008   //
  1009   typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
  1010                                               Label& L_loop, bool use_prefetch, bool use_bis);
  1012   void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
  1013                           int iter_size, CopyLoopFunc copy_loop_func) {
  1014     Label L_copy;
  1016     assert(log2_elem_size <= 3, "the following code should be changed");
  1017     int count_dec = 16>>log2_elem_size;
  1019     int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
  1020     assert(prefetch_dist < 4096, "invalid value");
  1021     prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
  1022     int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
  1024     if (UseBlockCopy) {
  1025       Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
  1027       // 64 bytes tail + bytes copied in one loop iteration
  1028       int tail_size = 64 + iter_size;
  1029       int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
  1030       // Use BIS copy only for big arrays since it requires membar.
  1031       __ set(block_copy_count, O4);
  1032       __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
  1033       // This code is for disjoint source and destination:
  1034       //   to <= from || to >= from+count
  1035       // but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
  1036       __ sub(from, to, O4);
  1037       __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
  1038       __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
  1040       __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
  1041       // BIS should not be used to copy tail (64 bytes+iter_size)
  1042       // to avoid zeroing of following values.
  1043       __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
  1045       if (prefetch_count > 0) { // rounded up to one iteration count
  1046         // Do prefetching only if copy size is bigger
  1047         // than prefetch distance.
  1048         __ set(prefetch_count, O4);
  1049         __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
  1050         __ sub(count, prefetch_count, count);
  1052         (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
  1053         __ add(count, prefetch_count, count); // restore count
  1055       } // prefetch_count > 0
  1057       (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
  1058       __ add(count, (tail_size>>log2_elem_size), count); // restore count
  1060       __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
  1061       // BIS needs membar.
  1062       __ membar(Assembler::StoreLoad);
  1063       // Copy tail
  1064       __ ba_short(L_copy);
  1066       __ BIND(L_skip_block_copy);
  1067     } // UseBlockCopy
  1069     if (prefetch_count > 0) { // rounded up to one iteration count
  1070       // Do prefetching only if copy size is bigger
  1071       // than prefetch distance.
  1072       __ set(prefetch_count, O4);
  1073       __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
  1074       __ sub(count, prefetch_count, count);
  1076       Label L_copy_prefetch;
  1077       (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
  1078       __ add(count, prefetch_count, count); // restore count
  1080     } // prefetch_count > 0
  1082     (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
  1087   //
  1088   // Helper methods for copy_16_bytes_forward_with_shift()
  1089   //
  1090   void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
  1091                                 Label& L_loop, bool use_prefetch, bool use_bis) {
  1093     const Register left_shift  = G1; // left  shift bit counter
  1094     const Register right_shift = G5; // right shift bit counter
  1096     __ align(OptoLoopAlignment);
  1097     __ BIND(L_loop);
  1098     if (use_prefetch) {
  1099       if (ArraycopySrcPrefetchDistance > 0) {
  1100         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
  1102       if (ArraycopyDstPrefetchDistance > 0) {
  1103         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
  1106     __ ldx(from, 0, O4);
  1107     __ ldx(from, 8, G4);
  1108     __ inc(to, 16);
  1109     __ inc(from, 16);
  1110     __ deccc(count, count_dec); // Can we do next iteration after this one?
  1111     __ srlx(O4, right_shift, G3);
  1112     __ bset(G3, O3);
  1113     __ sllx(O4, left_shift,  O4);
  1114     __ srlx(G4, right_shift, G3);
  1115     __ bset(G3, O4);
  1116     if (use_bis) {
  1117       __ stxa(O3, to, -16);
  1118       __ stxa(O4, to, -8);
  1119     } else {
  1120       __ stx(O3, to, -16);
  1121       __ stx(O4, to, -8);
  1123     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  1124     __ delayed()->sllx(G4, left_shift,  O3);
  1127   // Copy big chunks forward with shift
  1128   //
  1129   // Inputs:
  1130   //   from      - source arrays
  1131   //   to        - destination array aligned to 8-bytes
  1132   //   count     - elements count to copy >= the count equivalent to 16 bytes
  1133   //   count_dec - elements count's decrement equivalent to 16 bytes
  1134   //   L_copy_bytes - copy exit label
  1135   //
  1136   void copy_16_bytes_forward_with_shift(Register from, Register to,
  1137                      Register count, int log2_elem_size, Label& L_copy_bytes) {
  1138     Label L_aligned_copy, L_copy_last_bytes;
  1139     assert(log2_elem_size <= 3, "the following code should be changed");
  1140     int count_dec = 16>>log2_elem_size;
  1142     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
  1143     __ andcc(from, 7, G1); // misaligned bytes
  1144     __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  1145     __ delayed()->nop();
  1147     const Register left_shift  = G1; // left  shift bit counter
  1148     const Register right_shift = G5; // right shift bit counter
  1150     __ sll(G1, LogBitsPerByte, left_shift);
  1151     __ mov(64, right_shift);
  1152     __ sub(right_shift, left_shift, right_shift);
  1154     //
  1155     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  1156     // to form 2 aligned 8-bytes chunks to store.
  1157     //
  1158     __ dec(count, count_dec);   // Pre-decrement 'count'
  1159     __ andn(from, 7, from);     // Align address
  1160     __ ldx(from, 0, O3);
  1161     __ inc(from, 8);
  1162     __ sllx(O3, left_shift,  O3);
  1164     disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop);
  1166     __ inccc(count, count_dec>>1 ); // + 8 bytes
  1167     __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
  1168     __ delayed()->inc(count, count_dec>>1); // restore 'count'
  1170     // copy 8 bytes, part of them already loaded in O3
  1171     __ ldx(from, 0, O4);
  1172     __ inc(to, 8);
  1173     __ inc(from, 8);
  1174     __ srlx(O4, right_shift, G3);
  1175     __ bset(O3, G3);
  1176     __ stx(G3, to, -8);
  1178     __ BIND(L_copy_last_bytes);
  1179     __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
  1180     __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
  1181     __ delayed()->sub(from, right_shift, from);       // restore address
  1183     __ BIND(L_aligned_copy);
  1186   // Copy big chunks backward with shift
  1187   //
  1188   // Inputs:
  1189   //   end_from  - source arrays end address
  1190   //   end_to    - destination array end address aligned to 8-bytes
  1191   //   count     - elements count to copy >= the count equivalent to 16 bytes
  1192   //   count_dec - elements count's decrement equivalent to 16 bytes
  1193   //   L_aligned_copy - aligned copy exit label
  1194   //   L_copy_bytes   - copy exit label
  1195   //
  1196   void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
  1197                      Register count, int count_dec,
  1198                      Label& L_aligned_copy, Label& L_copy_bytes) {
  1199     Label L_loop, L_copy_last_bytes;
  1201     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
  1202       __ andcc(end_from, 7, G1); // misaligned bytes
  1203       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  1204       __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
  1206     const Register left_shift  = G1; // left  shift bit counter
  1207     const Register right_shift = G5; // right shift bit counter
  1209       __ sll(G1, LogBitsPerByte, left_shift);
  1210       __ mov(64, right_shift);
  1211       __ sub(right_shift, left_shift, right_shift);
  1213     //
  1214     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  1215     // to form 2 aligned 8-bytes chunks to store.
  1216     //
  1217       __ andn(end_from, 7, end_from);     // Align address
  1218       __ ldx(end_from, 0, O3);
  1219       __ align(OptoLoopAlignment);
  1220     __ BIND(L_loop);
  1221       __ ldx(end_from, -8, O4);
  1222       __ deccc(count, count_dec); // Can we do next iteration after this one?
  1223       __ ldx(end_from, -16, G4);
  1224       __ dec(end_to, 16);
  1225       __ dec(end_from, 16);
  1226       __ srlx(O3, right_shift, O3);
  1227       __ sllx(O4, left_shift,  G3);
  1228       __ bset(G3, O3);
  1229       __ stx(O3, end_to, 8);
  1230       __ srlx(O4, right_shift, O4);
  1231       __ sllx(G4, left_shift,  G3);
  1232       __ bset(G3, O4);
  1233       __ stx(O4, end_to, 0);
  1234       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  1235       __ delayed()->mov(G4, O3);
  1237       __ inccc(count, count_dec>>1 ); // + 8 bytes
  1238       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
  1239       __ delayed()->inc(count, count_dec>>1); // restore 'count'
  1241       // copy 8 bytes, part of them already loaded in O3
  1242       __ ldx(end_from, -8, O4);
  1243       __ dec(end_to, 8);
  1244       __ dec(end_from, 8);
  1245       __ srlx(O3, right_shift, O3);
  1246       __ sllx(O4, left_shift,  G3);
  1247       __ bset(O3, G3);
  1248       __ stx(G3, end_to, 0);
  1250     __ BIND(L_copy_last_bytes);
  1251       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
  1252       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
  1253       __ delayed()->add(end_from, left_shift, end_from); // restore address
  1256   //
  1257   //  Generate stub for disjoint byte copy.  If "aligned" is true, the
  1258   //  "from" and "to" addresses are assumed to be heapword aligned.
  1259   //
  1260   // Arguments for generated stub:
  1261   //      from:  O0
  1262   //      to:    O1
  1263   //      count: O2 treated as signed
  1264   //
  1265   address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
  1266     __ align(CodeEntryAlignment);
  1267     StubCodeMark mark(this, "StubRoutines", name);
  1268     address start = __ pc();
  1270     Label L_skip_alignment, L_align;
  1271     Label L_copy_byte, L_copy_byte_loop, L_exit;
  1273     const Register from      = O0;   // source array address
  1274     const Register to        = O1;   // destination array address
  1275     const Register count     = O2;   // elements count
  1276     const Register offset    = O5;   // offset from start of arrays
  1277     // O3, O4, G3, G4 are used as temp registers
  1279     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1281     if (entry != NULL) {
  1282       *entry = __ pc();
  1283       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1284       BLOCK_COMMENT("Entry:");
  1287     // for short arrays, just do single element copy
  1288     __ cmp(count, 23); // 16 + 7
  1289     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
  1290     __ delayed()->mov(G0, offset);
  1292     if (aligned) {
  1293       // 'aligned' == true when it is known statically during compilation
  1294       // of this arraycopy call site that both 'from' and 'to' addresses
  1295       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
  1296       //
  1297       // Aligned arrays have 4 bytes alignment in 32-bits VM
  1298       // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
  1299       //
  1300 #ifndef _LP64
  1301       // copy a 4-bytes word if necessary to align 'to' to 8 bytes
  1302       __ andcc(to, 7, G0);
  1303       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
  1304       __ delayed()->ld(from, 0, O3);
  1305       __ inc(from, 4);
  1306       __ inc(to, 4);
  1307       __ dec(count, 4);
  1308       __ st(O3, to, -4);
  1309     __ BIND(L_skip_alignment);
  1310 #endif
  1311     } else {
  1312       // copy bytes to align 'to' on 8 byte boundary
  1313       __ andcc(to, 7, G1); // misaligned bytes
  1314       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1315       __ delayed()->neg(G1);
  1316       __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
  1317       __ sub(count, G1, count);
  1318     __ BIND(L_align);
  1319       __ ldub(from, 0, O3);
  1320       __ deccc(G1);
  1321       __ inc(from);
  1322       __ stb(O3, to, 0);
  1323       __ br(Assembler::notZero, false, Assembler::pt, L_align);
  1324       __ delayed()->inc(to);
  1325     __ BIND(L_skip_alignment);
  1327 #ifdef _LP64
  1328     if (!aligned)
  1329 #endif
  1331       // Copy with shift 16 bytes per iteration if arrays do not have
  1332       // the same alignment mod 8, otherwise fall through to the next
  1333       // code for aligned copy.
  1334       // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
  1335       // Also jump over aligned copy after the copy with shift completed.
  1337       copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
  1340     // Both array are 8 bytes aligned, copy 16 bytes at a time
  1341       __ and3(count, 7, G4); // Save count
  1342       __ srl(count, 3, count);
  1343      generate_disjoint_long_copy_core(aligned);
  1344       __ mov(G4, count);     // Restore count
  1346     // copy tailing bytes
  1347     __ BIND(L_copy_byte);
  1348       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1349       __ align(OptoLoopAlignment);
  1350     __ BIND(L_copy_byte_loop);
  1351       __ ldub(from, offset, O3);
  1352       __ deccc(count);
  1353       __ stb(O3, to, offset);
  1354       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
  1355       __ delayed()->inc(offset);
  1357     __ BIND(L_exit);
  1358       // O3, O4 are used as temp registers
  1359       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
  1360       __ retl();
  1361       __ delayed()->mov(G0, O0); // return 0
  1362     return start;
  1365   //
  1366   //  Generate stub for conjoint byte copy.  If "aligned" is true, the
  1367   //  "from" and "to" addresses are assumed to be heapword aligned.
  1368   //
  1369   // Arguments for generated stub:
  1370   //      from:  O0
  1371   //      to:    O1
  1372   //      count: O2 treated as signed
  1373   //
  1374   address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
  1375                                       address *entry, const char *name) {
  1376     // Do reverse copy.
  1378     __ align(CodeEntryAlignment);
  1379     StubCodeMark mark(this, "StubRoutines", name);
  1380     address start = __ pc();
  1382     Label L_skip_alignment, L_align, L_aligned_copy;
  1383     Label L_copy_byte, L_copy_byte_loop, L_exit;
  1385     const Register from      = O0;   // source array address
  1386     const Register to        = O1;   // destination array address
  1387     const Register count     = O2;   // elements count
  1388     const Register end_from  = from; // source array end address
  1389     const Register end_to    = to;   // destination array end address
  1391     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1393     if (entry != NULL) {
  1394       *entry = __ pc();
  1395       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1396       BLOCK_COMMENT("Entry:");
  1399     array_overlap_test(nooverlap_target, 0);
  1401     __ add(to, count, end_to);       // offset after last copied element
  1403     // for short arrays, just do single element copy
  1404     __ cmp(count, 23); // 16 + 7
  1405     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
  1406     __ delayed()->add(from, count, end_from);
  1409       // Align end of arrays since they could be not aligned even
  1410       // when arrays itself are aligned.
  1412       // copy bytes to align 'end_to' on 8 byte boundary
  1413       __ andcc(end_to, 7, G1); // misaligned bytes
  1414       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1415       __ delayed()->nop();
  1416       __ sub(count, G1, count);
  1417     __ BIND(L_align);
  1418       __ dec(end_from);
  1419       __ dec(end_to);
  1420       __ ldub(end_from, 0, O3);
  1421       __ deccc(G1);
  1422       __ brx(Assembler::notZero, false, Assembler::pt, L_align);
  1423       __ delayed()->stb(O3, end_to, 0);
  1424     __ BIND(L_skip_alignment);
  1426 #ifdef _LP64
  1427     if (aligned) {
  1428       // Both arrays are aligned to 8-bytes in 64-bits VM.
  1429       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
  1430       // in unaligned case.
  1431       __ dec(count, 16);
  1432     } else
  1433 #endif
  1435       // Copy with shift 16 bytes per iteration if arrays do not have
  1436       // the same alignment mod 8, otherwise jump to the next
  1437       // code for aligned copy (and substracting 16 from 'count' before jump).
  1438       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
  1439       // Also jump over aligned copy after the copy with shift completed.
  1441       copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
  1442                                         L_aligned_copy, L_copy_byte);
  1444     // copy 4 elements (16 bytes) at a time
  1445       __ align(OptoLoopAlignment);
  1446     __ BIND(L_aligned_copy);
  1447       __ dec(end_from, 16);
  1448       __ ldx(end_from, 8, O3);
  1449       __ ldx(end_from, 0, O4);
  1450       __ dec(end_to, 16);
  1451       __ deccc(count, 16);
  1452       __ stx(O3, end_to, 8);
  1453       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
  1454       __ delayed()->stx(O4, end_to, 0);
  1455       __ inc(count, 16);
  1457     // copy 1 element (2 bytes) at a time
  1458     __ BIND(L_copy_byte);
  1459       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1460       __ align(OptoLoopAlignment);
  1461     __ BIND(L_copy_byte_loop);
  1462       __ dec(end_from);
  1463       __ dec(end_to);
  1464       __ ldub(end_from, 0, O4);
  1465       __ deccc(count);
  1466       __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
  1467       __ delayed()->stb(O4, end_to, 0);
  1469     __ BIND(L_exit);
  1470     // O3, O4 are used as temp registers
  1471     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
  1472     __ retl();
  1473     __ delayed()->mov(G0, O0); // return 0
  1474     return start;
  1477   //
  1478   //  Generate stub for disjoint short copy.  If "aligned" is true, the
  1479   //  "from" and "to" addresses are assumed to be heapword aligned.
  1480   //
  1481   // Arguments for generated stub:
  1482   //      from:  O0
  1483   //      to:    O1
  1484   //      count: O2 treated as signed
  1485   //
  1486   address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
  1487     __ align(CodeEntryAlignment);
  1488     StubCodeMark mark(this, "StubRoutines", name);
  1489     address start = __ pc();
  1491     Label L_skip_alignment, L_skip_alignment2;
  1492     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
  1494     const Register from      = O0;   // source array address
  1495     const Register to        = O1;   // destination array address
  1496     const Register count     = O2;   // elements count
  1497     const Register offset    = O5;   // offset from start of arrays
  1498     // O3, O4, G3, G4 are used as temp registers
  1500     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1502     if (entry != NULL) {
  1503       *entry = __ pc();
  1504       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1505       BLOCK_COMMENT("Entry:");
  1508     // for short arrays, just do single element copy
  1509     __ cmp(count, 11); // 8 + 3  (22 bytes)
  1510     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
  1511     __ delayed()->mov(G0, offset);
  1513     if (aligned) {
  1514       // 'aligned' == true when it is known statically during compilation
  1515       // of this arraycopy call site that both 'from' and 'to' addresses
  1516       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
  1517       //
  1518       // Aligned arrays have 4 bytes alignment in 32-bits VM
  1519       // and 8 bytes - in 64-bits VM.
  1520       //
  1521 #ifndef _LP64
  1522       // copy a 2-elements word if necessary to align 'to' to 8 bytes
  1523       __ andcc(to, 7, G0);
  1524       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1525       __ delayed()->ld(from, 0, O3);
  1526       __ inc(from, 4);
  1527       __ inc(to, 4);
  1528       __ dec(count, 2);
  1529       __ st(O3, to, -4);
  1530     __ BIND(L_skip_alignment);
  1531 #endif
  1532     } else {
  1533       // copy 1 element if necessary to align 'to' on an 4 bytes
  1534       __ andcc(to, 3, G0);
  1535       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1536       __ delayed()->lduh(from, 0, O3);
  1537       __ inc(from, 2);
  1538       __ inc(to, 2);
  1539       __ dec(count);
  1540       __ sth(O3, to, -2);
  1541     __ BIND(L_skip_alignment);
  1543       // copy 2 elements to align 'to' on an 8 byte boundary
  1544       __ andcc(to, 7, G0);
  1545       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
  1546       __ delayed()->lduh(from, 0, O3);
  1547       __ dec(count, 2);
  1548       __ lduh(from, 2, O4);
  1549       __ inc(from, 4);
  1550       __ inc(to, 4);
  1551       __ sth(O3, to, -4);
  1552       __ sth(O4, to, -2);
  1553     __ BIND(L_skip_alignment2);
  1555 #ifdef _LP64
  1556     if (!aligned)
  1557 #endif
  1559       // Copy with shift 16 bytes per iteration if arrays do not have
  1560       // the same alignment mod 8, otherwise fall through to the next
  1561       // code for aligned copy.
  1562       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
  1563       // Also jump over aligned copy after the copy with shift completed.
  1565       copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
  1568     // Both array are 8 bytes aligned, copy 16 bytes at a time
  1569       __ and3(count, 3, G4); // Save
  1570       __ srl(count, 2, count);
  1571      generate_disjoint_long_copy_core(aligned);
  1572       __ mov(G4, count); // restore
  1574     // copy 1 element at a time
  1575     __ BIND(L_copy_2_bytes);
  1576       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1577       __ align(OptoLoopAlignment);
  1578     __ BIND(L_copy_2_bytes_loop);
  1579       __ lduh(from, offset, O3);
  1580       __ deccc(count);
  1581       __ sth(O3, to, offset);
  1582       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
  1583       __ delayed()->inc(offset, 2);
  1585     __ BIND(L_exit);
  1586       // O3, O4 are used as temp registers
  1587       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
  1588       __ retl();
  1589       __ delayed()->mov(G0, O0); // return 0
  1590     return start;
  1593   //
  1594   //  Generate stub for disjoint short fill.  If "aligned" is true, the
  1595   //  "to" address is assumed to be heapword aligned.
  1596   //
  1597   // Arguments for generated stub:
  1598   //      to:    O0
  1599   //      value: O1
  1600   //      count: O2 treated as signed
  1601   //
  1602   address generate_fill(BasicType t, bool aligned, const char* name) {
  1603     __ align(CodeEntryAlignment);
  1604     StubCodeMark mark(this, "StubRoutines", name);
  1605     address start = __ pc();
  1607     const Register to        = O0;   // source array address
  1608     const Register value     = O1;   // fill value
  1609     const Register count     = O2;   // elements count
  1610     // O3 is used as a temp register
  1612     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1614     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
  1615     Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
  1617     int shift = -1;
  1618     switch (t) {
  1619        case T_BYTE:
  1620         shift = 2;
  1621         break;
  1622        case T_SHORT:
  1623         shift = 1;
  1624         break;
  1625       case T_INT:
  1626          shift = 0;
  1627         break;
  1628       default: ShouldNotReachHere();
  1631     BLOCK_COMMENT("Entry:");
  1633     if (t == T_BYTE) {
  1634       // Zero extend value
  1635       __ and3(value, 0xff, value);
  1636       __ sllx(value, 8, O3);
  1637       __ or3(value, O3, value);
  1639     if (t == T_SHORT) {
  1640       // Zero extend value
  1641       __ sllx(value, 48, value);
  1642       __ srlx(value, 48, value);
  1644     if (t == T_BYTE || t == T_SHORT) {
  1645       __ sllx(value, 16, O3);
  1646       __ or3(value, O3, value);
  1649     __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
  1650     __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
  1651     __ delayed()->andcc(count, 1, G0);
  1653     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
  1654       // align source address at 4 bytes address boundary
  1655       if (t == T_BYTE) {
  1656         // One byte misalignment happens only for byte arrays
  1657         __ andcc(to, 1, G0);
  1658         __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
  1659         __ delayed()->nop();
  1660         __ stb(value, to, 0);
  1661         __ inc(to, 1);
  1662         __ dec(count, 1);
  1663         __ BIND(L_skip_align1);
  1665       // Two bytes misalignment happens only for byte and short (char) arrays
  1666       __ andcc(to, 2, G0);
  1667       __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
  1668       __ delayed()->nop();
  1669       __ sth(value, to, 0);
  1670       __ inc(to, 2);
  1671       __ dec(count, 1 << (shift - 1));
  1672       __ BIND(L_skip_align2);
  1674 #ifdef _LP64
  1675     if (!aligned) {
  1676 #endif
  1677     // align to 8 bytes, we know we are 4 byte aligned to start
  1678     __ andcc(to, 7, G0);
  1679     __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
  1680     __ delayed()->nop();
  1681     __ stw(value, to, 0);
  1682     __ inc(to, 4);
  1683     __ dec(count, 1 << shift);
  1684     __ BIND(L_fill_32_bytes);
  1685 #ifdef _LP64
  1687 #endif
  1689     if (t == T_INT) {
  1690       // Zero extend value
  1691       __ srl(value, 0, value);
  1693     if (t == T_BYTE || t == T_SHORT || t == T_INT) {
  1694       __ sllx(value, 32, O3);
  1695       __ or3(value, O3, value);
  1698     Label L_check_fill_8_bytes;
  1699     // Fill 32-byte chunks
  1700     __ subcc(count, 8 << shift, count);
  1701     __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
  1702     __ delayed()->nop();
  1704     Label L_fill_32_bytes_loop, L_fill_4_bytes;
  1705     __ align(16);
  1706     __ BIND(L_fill_32_bytes_loop);
  1708     __ stx(value, to, 0);
  1709     __ stx(value, to, 8);
  1710     __ stx(value, to, 16);
  1711     __ stx(value, to, 24);
  1713     __ subcc(count, 8 << shift, count);
  1714     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
  1715     __ delayed()->add(to, 32, to);
  1717     __ BIND(L_check_fill_8_bytes);
  1718     __ addcc(count, 8 << shift, count);
  1719     __ brx(Assembler::zero, false, Assembler::pn, L_exit);
  1720     __ delayed()->subcc(count, 1 << (shift + 1), count);
  1721     __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
  1722     __ delayed()->andcc(count, 1<<shift, G0);
  1724     //
  1725     // length is too short, just fill 8 bytes at a time
  1726     //
  1727     Label L_fill_8_bytes_loop;
  1728     __ BIND(L_fill_8_bytes_loop);
  1729     __ stx(value, to, 0);
  1730     __ subcc(count, 1 << (shift + 1), count);
  1731     __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
  1732     __ delayed()->add(to, 8, to);
  1734     // fill trailing 4 bytes
  1735     __ andcc(count, 1<<shift, G0);  // in delay slot of branches
  1736     if (t == T_INT) {
  1737       __ BIND(L_fill_elements);
  1739     __ BIND(L_fill_4_bytes);
  1740     __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
  1741     if (t == T_BYTE || t == T_SHORT) {
  1742       __ delayed()->andcc(count, 1<<(shift-1), G0);
  1743     } else {
  1744       __ delayed()->nop();
  1746     __ stw(value, to, 0);
  1747     if (t == T_BYTE || t == T_SHORT) {
  1748       __ inc(to, 4);
  1749       // fill trailing 2 bytes
  1750       __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
  1751       __ BIND(L_fill_2_bytes);
  1752       __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
  1753       __ delayed()->andcc(count, 1, count);
  1754       __ sth(value, to, 0);
  1755       if (t == T_BYTE) {
  1756         __ inc(to, 2);
  1757         // fill trailing byte
  1758         __ andcc(count, 1, count);  // in delay slot of branches
  1759         __ BIND(L_fill_byte);
  1760         __ brx(Assembler::zero, false, Assembler::pt, L_exit);
  1761         __ delayed()->nop();
  1762         __ stb(value, to, 0);
  1763       } else {
  1764         __ BIND(L_fill_byte);
  1766     } else {
  1767       __ BIND(L_fill_2_bytes);
  1769     __ BIND(L_exit);
  1770     __ retl();
  1771     __ delayed()->nop();
  1773     // Handle copies less than 8 bytes.  Int is handled elsewhere.
  1774     if (t == T_BYTE) {
  1775       __ BIND(L_fill_elements);
  1776       Label L_fill_2, L_fill_4;
  1777       // in delay slot __ andcc(count, 1, G0);
  1778       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
  1779       __ delayed()->andcc(count, 2, G0);
  1780       __ stb(value, to, 0);
  1781       __ inc(to, 1);
  1782       __ BIND(L_fill_2);
  1783       __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
  1784       __ delayed()->andcc(count, 4, G0);
  1785       __ stb(value, to, 0);
  1786       __ stb(value, to, 1);
  1787       __ inc(to, 2);
  1788       __ BIND(L_fill_4);
  1789       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
  1790       __ delayed()->nop();
  1791       __ stb(value, to, 0);
  1792       __ stb(value, to, 1);
  1793       __ stb(value, to, 2);
  1794       __ retl();
  1795       __ delayed()->stb(value, to, 3);
  1798     if (t == T_SHORT) {
  1799       Label L_fill_2;
  1800       __ BIND(L_fill_elements);
  1801       // in delay slot __ andcc(count, 1, G0);
  1802       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
  1803       __ delayed()->andcc(count, 2, G0);
  1804       __ sth(value, to, 0);
  1805       __ inc(to, 2);
  1806       __ BIND(L_fill_2);
  1807       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
  1808       __ delayed()->nop();
  1809       __ sth(value, to, 0);
  1810       __ retl();
  1811       __ delayed()->sth(value, to, 2);
  1813     return start;
  1816   //
  1817   //  Generate stub for conjoint short copy.  If "aligned" is true, the
  1818   //  "from" and "to" addresses are assumed to be heapword aligned.
  1819   //
  1820   // Arguments for generated stub:
  1821   //      from:  O0
  1822   //      to:    O1
  1823   //      count: O2 treated as signed
  1824   //
  1825   address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
  1826                                        address *entry, const char *name) {
  1827     // Do reverse copy.
  1829     __ align(CodeEntryAlignment);
  1830     StubCodeMark mark(this, "StubRoutines", name);
  1831     address start = __ pc();
  1833     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
  1834     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
  1836     const Register from      = O0;   // source array address
  1837     const Register to        = O1;   // destination array address
  1838     const Register count     = O2;   // elements count
  1839     const Register end_from  = from; // source array end address
  1840     const Register end_to    = to;   // destination array end address
  1842     const Register byte_count = O3;  // bytes count to copy
  1844     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  1846     if (entry != NULL) {
  1847       *entry = __ pc();
  1848       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  1849       BLOCK_COMMENT("Entry:");
  1852     array_overlap_test(nooverlap_target, 1);
  1854     __ sllx(count, LogBytesPerShort, byte_count);
  1855     __ add(to, byte_count, end_to);  // offset after last copied element
  1857     // for short arrays, just do single element copy
  1858     __ cmp(count, 11); // 8 + 3  (22 bytes)
  1859     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
  1860     __ delayed()->add(from, byte_count, end_from);
  1863       // Align end of arrays since they could be not aligned even
  1864       // when arrays itself are aligned.
  1866       // copy 1 element if necessary to align 'end_to' on an 4 bytes
  1867       __ andcc(end_to, 3, G0);
  1868       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  1869       __ delayed()->lduh(end_from, -2, O3);
  1870       __ dec(end_from, 2);
  1871       __ dec(end_to, 2);
  1872       __ dec(count);
  1873       __ sth(O3, end_to, 0);
  1874     __ BIND(L_skip_alignment);
  1876       // copy 2 elements to align 'end_to' on an 8 byte boundary
  1877       __ andcc(end_to, 7, G0);
  1878       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
  1879       __ delayed()->lduh(end_from, -2, O3);
  1880       __ dec(count, 2);
  1881       __ lduh(end_from, -4, O4);
  1882       __ dec(end_from, 4);
  1883       __ dec(end_to, 4);
  1884       __ sth(O3, end_to, 2);
  1885       __ sth(O4, end_to, 0);
  1886     __ BIND(L_skip_alignment2);
  1888 #ifdef _LP64
  1889     if (aligned) {
  1890       // Both arrays are aligned to 8-bytes in 64-bits VM.
  1891       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
  1892       // in unaligned case.
  1893       __ dec(count, 8);
  1894     } else
  1895 #endif
  1897       // Copy with shift 16 bytes per iteration if arrays do not have
  1898       // the same alignment mod 8, otherwise jump to the next
  1899       // code for aligned copy (and substracting 8 from 'count' before jump).
  1900       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
  1901       // Also jump over aligned copy after the copy with shift completed.
  1903       copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
  1904                                         L_aligned_copy, L_copy_2_bytes);
  1906     // copy 4 elements (16 bytes) at a time
  1907       __ align(OptoLoopAlignment);
  1908     __ BIND(L_aligned_copy);
  1909       __ dec(end_from, 16);
  1910       __ ldx(end_from, 8, O3);
  1911       __ ldx(end_from, 0, O4);
  1912       __ dec(end_to, 16);
  1913       __ deccc(count, 8);
  1914       __ stx(O3, end_to, 8);
  1915       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
  1916       __ delayed()->stx(O4, end_to, 0);
  1917       __ inc(count, 8);
  1919     // copy 1 element (2 bytes) at a time
  1920     __ BIND(L_copy_2_bytes);
  1921       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  1922     __ BIND(L_copy_2_bytes_loop);
  1923       __ dec(end_from, 2);
  1924       __ dec(end_to, 2);
  1925       __ lduh(end_from, 0, O4);
  1926       __ deccc(count);
  1927       __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
  1928       __ delayed()->sth(O4, end_to, 0);
  1930     __ BIND(L_exit);
  1931     // O3, O4 are used as temp registers
  1932     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
  1933     __ retl();
  1934     __ delayed()->mov(G0, O0); // return 0
  1935     return start;
  1938   //
  1939   // Helper methods for generate_disjoint_int_copy_core()
  1940   //
  1941   void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
  1942                           Label& L_loop, bool use_prefetch, bool use_bis) {
  1944     __ align(OptoLoopAlignment);
  1945     __ BIND(L_loop);
  1946     if (use_prefetch) {
  1947       if (ArraycopySrcPrefetchDistance > 0) {
  1948         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
  1950       if (ArraycopyDstPrefetchDistance > 0) {
  1951         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
  1954     __ ldx(from, 4, O4);
  1955     __ ldx(from, 12, G4);
  1956     __ inc(to, 16);
  1957     __ inc(from, 16);
  1958     __ deccc(count, 4); // Can we do next iteration after this one?
  1960     __ srlx(O4, 32, G3);
  1961     __ bset(G3, O3);
  1962     __ sllx(O4, 32, O4);
  1963     __ srlx(G4, 32, G3);
  1964     __ bset(G3, O4);
  1965     if (use_bis) {
  1966       __ stxa(O3, to, -16);
  1967       __ stxa(O4, to, -8);
  1968     } else {
  1969       __ stx(O3, to, -16);
  1970       __ stx(O4, to, -8);
  1972     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  1973     __ delayed()->sllx(G4, 32,  O3);
  1977   //
  1978   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
  1979   //  If "aligned" is true, the "from" and "to" addresses are assumed
  1980   //  to be heapword aligned.
  1981   //
  1982   // Arguments:
  1983   //      from:  O0
  1984   //      to:    O1
  1985   //      count: O2 treated as signed
  1986   //
  1987   void generate_disjoint_int_copy_core(bool aligned) {
  1989     Label L_skip_alignment, L_aligned_copy;
  1990     Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
  1992     const Register from      = O0;   // source array address
  1993     const Register to        = O1;   // destination array address
  1994     const Register count     = O2;   // elements count
  1995     const Register offset    = O5;   // offset from start of arrays
  1996     // O3, O4, G3, G4 are used as temp registers
  1998     // 'aligned' == true when it is known statically during compilation
  1999     // of this arraycopy call site that both 'from' and 'to' addresses
  2000     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
  2001     //
  2002     // Aligned arrays have 4 bytes alignment in 32-bits VM
  2003     // and 8 bytes - in 64-bits VM.
  2004     //
  2005 #ifdef _LP64
  2006     if (!aligned)
  2007 #endif
  2009       // The next check could be put under 'ifndef' since the code in
  2010       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
  2012       // for short arrays, just do single element copy
  2013       __ cmp(count, 5); // 4 + 1 (20 bytes)
  2014       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
  2015       __ delayed()->mov(G0, offset);
  2017       // copy 1 element to align 'to' on an 8 byte boundary
  2018       __ andcc(to, 7, G0);
  2019       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  2020       __ delayed()->ld(from, 0, O3);
  2021       __ inc(from, 4);
  2022       __ inc(to, 4);
  2023       __ dec(count);
  2024       __ st(O3, to, -4);
  2025     __ BIND(L_skip_alignment);
  2027     // if arrays have same alignment mod 8, do 4 elements copy
  2028       __ andcc(from, 7, G0);
  2029       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  2030       __ delayed()->ld(from, 0, O3);
  2032     //
  2033     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  2034     // to form 2 aligned 8-bytes chunks to store.
  2035     //
  2036     // copy_16_bytes_forward_with_shift() is not used here since this
  2037     // code is more optimal.
  2039     // copy with shift 4 elements (16 bytes) at a time
  2040       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
  2041       __ sllx(O3, 32,  O3);
  2043       disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop);
  2045       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
  2046       __ delayed()->inc(count, 4); // restore 'count'
  2048     __ BIND(L_aligned_copy);
  2049     } // !aligned
  2051     // copy 4 elements (16 bytes) at a time
  2052       __ and3(count, 1, G4); // Save
  2053       __ srl(count, 1, count);
  2054      generate_disjoint_long_copy_core(aligned);
  2055       __ mov(G4, count);     // Restore
  2057     // copy 1 element at a time
  2058     __ BIND(L_copy_4_bytes);
  2059       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  2060     __ BIND(L_copy_4_bytes_loop);
  2061       __ ld(from, offset, O3);
  2062       __ deccc(count);
  2063       __ st(O3, to, offset);
  2064       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
  2065       __ delayed()->inc(offset, 4);
  2066     __ BIND(L_exit);
  2069   //
  2070   //  Generate stub for disjoint int copy.  If "aligned" is true, the
  2071   //  "from" and "to" addresses are assumed to be heapword aligned.
  2072   //
  2073   // Arguments for generated stub:
  2074   //      from:  O0
  2075   //      to:    O1
  2076   //      count: O2 treated as signed
  2077   //
  2078   address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
  2079     __ align(CodeEntryAlignment);
  2080     StubCodeMark mark(this, "StubRoutines", name);
  2081     address start = __ pc();
  2083     const Register count = O2;
  2084     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2086     if (entry != NULL) {
  2087       *entry = __ pc();
  2088       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2089       BLOCK_COMMENT("Entry:");
  2092     generate_disjoint_int_copy_core(aligned);
  2094     // O3, O4 are used as temp registers
  2095     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
  2096     __ retl();
  2097     __ delayed()->mov(G0, O0); // return 0
  2098     return start;
  2101   //
  2102   //  Generate core code for conjoint int copy (and oop copy on 32-bit).
  2103   //  If "aligned" is true, the "from" and "to" addresses are assumed
  2104   //  to be heapword aligned.
  2105   //
  2106   // Arguments:
  2107   //      from:  O0
  2108   //      to:    O1
  2109   //      count: O2 treated as signed
  2110   //
  2111   void generate_conjoint_int_copy_core(bool aligned) {
  2112     // Do reverse copy.
  2114     Label L_skip_alignment, L_aligned_copy;
  2115     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
  2117     const Register from      = O0;   // source array address
  2118     const Register to        = O1;   // destination array address
  2119     const Register count     = O2;   // elements count
  2120     const Register end_from  = from; // source array end address
  2121     const Register end_to    = to;   // destination array end address
  2122     // O3, O4, O5, G3 are used as temp registers
  2124     const Register byte_count = O3;  // bytes count to copy
  2126       __ sllx(count, LogBytesPerInt, byte_count);
  2127       __ add(to, byte_count, end_to); // offset after last copied element
  2129       __ cmp(count, 5); // for short arrays, just do single element copy
  2130       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
  2131       __ delayed()->add(from, byte_count, end_from);
  2133     // copy 1 element to align 'to' on an 8 byte boundary
  2134       __ andcc(end_to, 7, G0);
  2135       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
  2136       __ delayed()->nop();
  2137       __ dec(count);
  2138       __ dec(end_from, 4);
  2139       __ dec(end_to,   4);
  2140       __ ld(end_from, 0, O4);
  2141       __ st(O4, end_to, 0);
  2142     __ BIND(L_skip_alignment);
  2144     // Check if 'end_from' and 'end_to' has the same alignment.
  2145       __ andcc(end_from, 7, G0);
  2146       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
  2147       __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
  2149     // copy with shift 4 elements (16 bytes) at a time
  2150     //
  2151     // Load 2 aligned 8-bytes chunks and use one from previous iteration
  2152     // to form 2 aligned 8-bytes chunks to store.
  2153     //
  2154       __ ldx(end_from, -4, O3);
  2155       __ align(OptoLoopAlignment);
  2156     __ BIND(L_copy_16_bytes);
  2157       __ ldx(end_from, -12, O4);
  2158       __ deccc(count, 4);
  2159       __ ldx(end_from, -20, O5);
  2160       __ dec(end_to, 16);
  2161       __ dec(end_from, 16);
  2162       __ srlx(O3, 32, O3);
  2163       __ sllx(O4, 32, G3);
  2164       __ bset(G3, O3);
  2165       __ stx(O3, end_to, 8);
  2166       __ srlx(O4, 32, O4);
  2167       __ sllx(O5, 32, G3);
  2168       __ bset(O4, G3);
  2169       __ stx(G3, end_to, 0);
  2170       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
  2171       __ delayed()->mov(O5, O3);
  2173       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
  2174       __ delayed()->inc(count, 4);
  2176     // copy 4 elements (16 bytes) at a time
  2177       __ align(OptoLoopAlignment);
  2178     __ BIND(L_aligned_copy);
  2179       __ dec(end_from, 16);
  2180       __ ldx(end_from, 8, O3);
  2181       __ ldx(end_from, 0, O4);
  2182       __ dec(end_to, 16);
  2183       __ deccc(count, 4);
  2184       __ stx(O3, end_to, 8);
  2185       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
  2186       __ delayed()->stx(O4, end_to, 0);
  2187       __ inc(count, 4);
  2189     // copy 1 element (4 bytes) at a time
  2190     __ BIND(L_copy_4_bytes);
  2191       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
  2192     __ BIND(L_copy_4_bytes_loop);
  2193       __ dec(end_from, 4);
  2194       __ dec(end_to, 4);
  2195       __ ld(end_from, 0, O4);
  2196       __ deccc(count);
  2197       __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
  2198       __ delayed()->st(O4, end_to, 0);
  2199     __ BIND(L_exit);
  2202   //
  2203   //  Generate stub for conjoint int copy.  If "aligned" is true, the
  2204   //  "from" and "to" addresses are assumed to be heapword aligned.
  2205   //
  2206   // Arguments for generated stub:
  2207   //      from:  O0
  2208   //      to:    O1
  2209   //      count: O2 treated as signed
  2210   //
  2211   address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
  2212                                      address *entry, const char *name) {
  2213     __ align(CodeEntryAlignment);
  2214     StubCodeMark mark(this, "StubRoutines", name);
  2215     address start = __ pc();
  2217     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
  2219     if (entry != NULL) {
  2220       *entry = __ pc();
  2221       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2222       BLOCK_COMMENT("Entry:");
  2225     array_overlap_test(nooverlap_target, 2);
  2227     generate_conjoint_int_copy_core(aligned);
  2229     // O3, O4 are used as temp registers
  2230     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
  2231     __ retl();
  2232     __ delayed()->mov(G0, O0); // return 0
  2233     return start;
  2236   //
  2237   // Helper methods for generate_disjoint_long_copy_core()
  2238   //
  2239   void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
  2240                           Label& L_loop, bool use_prefetch, bool use_bis) {
  2241     __ align(OptoLoopAlignment);
  2242     __ BIND(L_loop);
  2243     for (int off = 0; off < 64; off += 16) {
  2244       if (use_prefetch && (off & 31) == 0) {
  2245         if (ArraycopySrcPrefetchDistance > 0) {
  2246           __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads);
  2248         if (ArraycopyDstPrefetchDistance > 0) {
  2249           __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads);
  2252       __ ldx(from,  off+0, O4);
  2253       __ ldx(from,  off+8, O5);
  2254       if (use_bis) {
  2255         __ stxa(O4, to,  off+0);
  2256         __ stxa(O5, to,  off+8);
  2257       } else {
  2258         __ stx(O4, to,  off+0);
  2259         __ stx(O5, to,  off+8);
  2262     __ deccc(count, 8);
  2263     __ inc(from, 64);
  2264     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
  2265     __ delayed()->inc(to, 64);
  2268   //
  2269   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
  2270   //  "aligned" is ignored, because we must make the stronger
  2271   //  assumption that both addresses are always 64-bit aligned.
  2272   //
  2273   // Arguments:
  2274   //      from:  O0
  2275   //      to:    O1
  2276   //      count: O2 treated as signed
  2277   //
  2278   // count -= 2;
  2279   // if ( count >= 0 ) { // >= 2 elements
  2280   //   if ( count > 6) { // >= 8 elements
  2281   //     count -= 6; // original count - 8
  2282   //     do {
  2283   //       copy_8_elements;
  2284   //       count -= 8;
  2285   //     } while ( count >= 0 );
  2286   //     count += 6;
  2287   //   }
  2288   //   if ( count >= 0 ) { // >= 2 elements
  2289   //     do {
  2290   //       copy_2_elements;
  2291   //     } while ( (count=count-2) >= 0 );
  2292   //   }
  2293   // }
  2294   // count += 2;
  2295   // if ( count != 0 ) { // 1 element left
  2296   //   copy_1_element;
  2297   // }
  2298   //
  2299   void generate_disjoint_long_copy_core(bool aligned) {
  2300     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
  2301     const Register from    = O0;  // source array address
  2302     const Register to      = O1;  // destination array address
  2303     const Register count   = O2;  // elements count
  2304     const Register offset0 = O4;  // element offset
  2305     const Register offset8 = O5;  // next element offset
  2307     __ deccc(count, 2);
  2308     __ mov(G0, offset0);   // offset from start of arrays (0)
  2309     __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
  2310     __ delayed()->add(offset0, 8, offset8);
  2312     // Copy by 64 bytes chunks
  2314     const Register from64 = O3;  // source address
  2315     const Register to64   = G3;  // destination address
  2316     __ subcc(count, 6, O3);
  2317     __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
  2318     __ delayed()->mov(to,   to64);
  2319     // Now we can use O4(offset0), O5(offset8) as temps
  2320     __ mov(O3, count);
  2321     // count >= 0 (original count - 8)
  2322     __ mov(from, from64);
  2324     disjoint_copy_core(from64, to64, count, 3, 64, copy_64_bytes_loop);
  2326       // Restore O4(offset0), O5(offset8)
  2327       __ sub(from64, from, offset0);
  2328       __ inccc(count, 6); // restore count
  2329       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
  2330       __ delayed()->add(offset0, 8, offset8);
  2332       // Copy by 16 bytes chunks
  2333       __ align(OptoLoopAlignment);
  2334     __ BIND(L_copy_16_bytes);
  2335       __ ldx(from, offset0, O3);
  2336       __ ldx(from, offset8, G3);
  2337       __ deccc(count, 2);
  2338       __ stx(O3, to, offset0);
  2339       __ inc(offset0, 16);
  2340       __ stx(G3, to, offset8);
  2341       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
  2342       __ delayed()->inc(offset8, 16);
  2344       // Copy last 8 bytes
  2345     __ BIND(L_copy_8_bytes);
  2346       __ inccc(count, 2);
  2347       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
  2348       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
  2349       __ ldx(from, offset0, O3);
  2350       __ stx(O3, to, offset0);
  2351     __ BIND(L_exit);
  2354   //
  2355   //  Generate stub for disjoint long copy.
  2356   //  "aligned" is ignored, because we must make the stronger
  2357   //  assumption that both addresses are always 64-bit aligned.
  2358   //
  2359   // Arguments for generated stub:
  2360   //      from:  O0
  2361   //      to:    O1
  2362   //      count: O2 treated as signed
  2363   //
  2364   address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
  2365     __ align(CodeEntryAlignment);
  2366     StubCodeMark mark(this, "StubRoutines", name);
  2367     address start = __ pc();
  2369     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
  2371     if (entry != NULL) {
  2372       *entry = __ pc();
  2373       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2374       BLOCK_COMMENT("Entry:");
  2377     generate_disjoint_long_copy_core(aligned);
  2379     // O3, O4 are used as temp registers
  2380     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
  2381     __ retl();
  2382     __ delayed()->mov(G0, O0); // return 0
  2383     return start;
  2386   //
  2387   //  Generate core code for conjoint long copy (and oop copy on 64-bit).
  2388   //  "aligned" is ignored, because we must make the stronger
  2389   //  assumption that both addresses are always 64-bit aligned.
  2390   //
  2391   // Arguments:
  2392   //      from:  O0
  2393   //      to:    O1
  2394   //      count: O2 treated as signed
  2395   //
  2396   void generate_conjoint_long_copy_core(bool aligned) {
  2397     // Do reverse copy.
  2398     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
  2399     const Register from    = O0;  // source array address
  2400     const Register to      = O1;  // destination array address
  2401     const Register count   = O2;  // elements count
  2402     const Register offset8 = O4;  // element offset
  2403     const Register offset0 = O5;  // previous element offset
  2405       __ subcc(count, 1, count);
  2406       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
  2407       __ delayed()->sllx(count, LogBytesPerLong, offset8);
  2408       __ sub(offset8, 8, offset0);
  2409       __ align(OptoLoopAlignment);
  2410     __ BIND(L_copy_16_bytes);
  2411       __ ldx(from, offset8, O2);
  2412       __ ldx(from, offset0, O3);
  2413       __ stx(O2, to, offset8);
  2414       __ deccc(offset8, 16);      // use offset8 as counter
  2415       __ stx(O3, to, offset0);
  2416       __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
  2417       __ delayed()->dec(offset0, 16);
  2419     __ BIND(L_copy_8_bytes);
  2420       __ brx(Assembler::negative, false, Assembler::pn, L_exit );
  2421       __ delayed()->nop();
  2422       __ ldx(from, 0, O3);
  2423       __ stx(O3, to, 0);
  2424     __ BIND(L_exit);
  2427   //  Generate stub for conjoint long copy.
  2428   //  "aligned" is ignored, because we must make the stronger
  2429   //  assumption that both addresses are always 64-bit aligned.
  2430   //
  2431   // Arguments for generated stub:
  2432   //      from:  O0
  2433   //      to:    O1
  2434   //      count: O2 treated as signed
  2435   //
  2436   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
  2437                                       address *entry, const char *name) {
  2438     __ align(CodeEntryAlignment);
  2439     StubCodeMark mark(this, "StubRoutines", name);
  2440     address start = __ pc();
  2442     assert(aligned, "Should always be aligned");
  2444     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
  2446     if (entry != NULL) {
  2447       *entry = __ pc();
  2448       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
  2449       BLOCK_COMMENT("Entry:");
  2452     array_overlap_test(nooverlap_target, 3);
  2454     generate_conjoint_long_copy_core(aligned);
  2456     // O3, O4 are used as temp registers
  2457     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
  2458     __ retl();
  2459     __ delayed()->mov(G0, O0); // return 0
  2460     return start;
  2463   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
  2464   //  "from" and "to" addresses are assumed to be heapword aligned.
  2465   //
  2466   // Arguments for generated stub:
  2467   //      from:  O0
  2468   //      to:    O1
  2469   //      count: O2 treated as signed
  2470   //
  2471   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
  2472                                      bool dest_uninitialized = false) {
  2474     const Register from  = O0;  // source array address
  2475     const Register to    = O1;  // destination array address
  2476     const Register count = O2;  // elements count
  2478     __ align(CodeEntryAlignment);
  2479     StubCodeMark mark(this, "StubRoutines", name);
  2480     address start = __ pc();
  2482     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2484     if (entry != NULL) {
  2485       *entry = __ pc();
  2486       // caller can pass a 64-bit byte count here
  2487       BLOCK_COMMENT("Entry:");
  2490     // save arguments for barrier generation
  2491     __ mov(to, G1);
  2492     __ mov(count, G5);
  2493     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
  2494   #ifdef _LP64
  2495     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2496     if (UseCompressedOops) {
  2497       generate_disjoint_int_copy_core(aligned);
  2498     } else {
  2499       generate_disjoint_long_copy_core(aligned);
  2501   #else
  2502     generate_disjoint_int_copy_core(aligned);
  2503   #endif
  2504     // O0 is used as temp register
  2505     gen_write_ref_array_post_barrier(G1, G5, O0);
  2507     // O3, O4 are used as temp registers
  2508     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
  2509     __ retl();
  2510     __ delayed()->mov(G0, O0); // return 0
  2511     return start;
  2514   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
  2515   //  "from" and "to" addresses are assumed to be heapword aligned.
  2516   //
  2517   // Arguments for generated stub:
  2518   //      from:  O0
  2519   //      to:    O1
  2520   //      count: O2 treated as signed
  2521   //
  2522   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
  2523                                      address *entry, const char *name,
  2524                                      bool dest_uninitialized = false) {
  2526     const Register from  = O0;  // source array address
  2527     const Register to    = O1;  // destination array address
  2528     const Register count = O2;  // elements count
  2530     __ align(CodeEntryAlignment);
  2531     StubCodeMark mark(this, "StubRoutines", name);
  2532     address start = __ pc();
  2534     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
  2536     if (entry != NULL) {
  2537       *entry = __ pc();
  2538       // caller can pass a 64-bit byte count here
  2539       BLOCK_COMMENT("Entry:");
  2542     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
  2544     // save arguments for barrier generation
  2545     __ mov(to, G1);
  2546     __ mov(count, G5);
  2547     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
  2549   #ifdef _LP64
  2550     if (UseCompressedOops) {
  2551       generate_conjoint_int_copy_core(aligned);
  2552     } else {
  2553       generate_conjoint_long_copy_core(aligned);
  2555   #else
  2556     generate_conjoint_int_copy_core(aligned);
  2557   #endif
  2559     // O0 is used as temp register
  2560     gen_write_ref_array_post_barrier(G1, G5, O0);
  2562     // O3, O4 are used as temp registers
  2563     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
  2564     __ retl();
  2565     __ delayed()->mov(G0, O0); // return 0
  2566     return start;
  2570   // Helper for generating a dynamic type check.
  2571   // Smashes only the given temp registers.
  2572   void generate_type_check(Register sub_klass,
  2573                            Register super_check_offset,
  2574                            Register super_klass,
  2575                            Register temp,
  2576                            Label& L_success) {
  2577     assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
  2579     BLOCK_COMMENT("type_check:");
  2581     Label L_miss, L_pop_to_miss;
  2583     assert_clean_int(super_check_offset, temp);
  2585     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
  2586                                      &L_success, &L_miss, NULL,
  2587                                      super_check_offset);
  2589     BLOCK_COMMENT("type_check_slow_path:");
  2590     __ save_frame(0);
  2591     __ check_klass_subtype_slow_path(sub_klass->after_save(),
  2592                                      super_klass->after_save(),
  2593                                      L0, L1, L2, L4,
  2594                                      NULL, &L_pop_to_miss);
  2595     __ ba(L_success);
  2596     __ delayed()->restore();
  2598     __ bind(L_pop_to_miss);
  2599     __ restore();
  2601     // Fall through on failure!
  2602     __ BIND(L_miss);
  2606   //  Generate stub for checked oop copy.
  2607   //
  2608   // Arguments for generated stub:
  2609   //      from:  O0
  2610   //      to:    O1
  2611   //      count: O2 treated as signed
  2612   //      ckoff: O3 (super_check_offset)
  2613   //      ckval: O4 (super_klass)
  2614   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
  2615   //
  2616   address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) {
  2618     const Register O0_from   = O0;      // source array address
  2619     const Register O1_to     = O1;      // destination array address
  2620     const Register O2_count  = O2;      // elements count
  2621     const Register O3_ckoff  = O3;      // super_check_offset
  2622     const Register O4_ckval  = O4;      // super_klass
  2624     const Register O5_offset = O5;      // loop var, with stride wordSize
  2625     const Register G1_remain = G1;      // loop var, with stride -1
  2626     const Register G3_oop    = G3;      // actual oop copied
  2627     const Register G4_klass  = G4;      // oop._klass
  2628     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
  2630     __ align(CodeEntryAlignment);
  2631     StubCodeMark mark(this, "StubRoutines", name);
  2632     address start = __ pc();
  2634 #ifdef ASSERT
  2635     // We sometimes save a frame (see generate_type_check below).
  2636     // If this will cause trouble, let's fail now instead of later.
  2637     __ save_frame(0);
  2638     __ restore();
  2639 #endif
  2641     assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
  2643 #ifdef ASSERT
  2644     // caller guarantees that the arrays really are different
  2645     // otherwise, we would have to make conjoint checks
  2646     { Label L;
  2647       __ mov(O3, G1);           // spill: overlap test smashes O3
  2648       __ mov(O4, G4);           // spill: overlap test smashes O4
  2649       array_overlap_test(L, LogBytesPerHeapOop);
  2650       __ stop("checkcast_copy within a single array");
  2651       __ bind(L);
  2652       __ mov(G1, O3);
  2653       __ mov(G4, O4);
  2655 #endif //ASSERT
  2657     if (entry != NULL) {
  2658       *entry = __ pc();
  2659       // caller can pass a 64-bit byte count here (from generic stub)
  2660       BLOCK_COMMENT("Entry:");
  2662     gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized);
  2664     Label load_element, store_element, do_card_marks, fail, done;
  2665     __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
  2666     __ brx(Assembler::notZero, false, Assembler::pt, load_element);
  2667     __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
  2669     // Empty array:  Nothing to do.
  2670     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
  2671     __ retl();
  2672     __ delayed()->set(0, O0);           // return 0 on (trivial) success
  2674     // ======== begin loop ========
  2675     // (Loop is rotated; its entry is load_element.)
  2676     // Loop variables:
  2677     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
  2678     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
  2679     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
  2680     __ align(OptoLoopAlignment);
  2682     __ BIND(store_element);
  2683     __ deccc(G1_remain);                // decrement the count
  2684     __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
  2685     __ inc(O5_offset, heapOopSize);     // step to next offset
  2686     __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
  2687     __ delayed()->set(0, O0);           // return -1 on success
  2689     // ======== loop entry is here ========
  2690     __ BIND(load_element);
  2691     __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
  2692     __ br_null_short(G3_oop, Assembler::pt, store_element);
  2694     __ load_klass(G3_oop, G4_klass); // query the object klass
  2696     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
  2697                         // branch to this on success:
  2698                         store_element);
  2699     // ======== end loop ========
  2701     // It was a real error; we must depend on the caller to finish the job.
  2702     // Register G1 has number of *remaining* oops, O2 number of *total* oops.
  2703     // Emit GC store barriers for the oops we have copied (O2 minus G1),
  2704     // and report their number to the caller.
  2705     __ BIND(fail);
  2706     __ subcc(O2_count, G1_remain, O2_count);
  2707     __ brx(Assembler::zero, false, Assembler::pt, done);
  2708     __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
  2710     __ BIND(do_card_marks);
  2711     gen_write_ref_array_post_barrier(O1_to, O2_count, O3);   // store check on O1[0..O2]
  2713     __ BIND(done);
  2714     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
  2715     __ retl();
  2716     __ delayed()->nop();             // return value in 00
  2718     return start;
  2722   //  Generate 'unsafe' array copy stub
  2723   //  Though just as safe as the other stubs, it takes an unscaled
  2724   //  size_t argument instead of an element count.
  2725   //
  2726   // Arguments for generated stub:
  2727   //      from:  O0
  2728   //      to:    O1
  2729   //      count: O2 byte count, treated as ssize_t, can be zero
  2730   //
  2731   // Examines the alignment of the operands and dispatches
  2732   // to a long, int, short, or byte copy loop.
  2733   //
  2734   address generate_unsafe_copy(const char* name,
  2735                                address byte_copy_entry,
  2736                                address short_copy_entry,
  2737                                address int_copy_entry,
  2738                                address long_copy_entry) {
  2740     const Register O0_from   = O0;      // source array address
  2741     const Register O1_to     = O1;      // destination array address
  2742     const Register O2_count  = O2;      // elements count
  2744     const Register G1_bits   = G1;      // test copy of low bits
  2746     __ align(CodeEntryAlignment);
  2747     StubCodeMark mark(this, "StubRoutines", name);
  2748     address start = __ pc();
  2750     // bump this on entry, not on exit:
  2751     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
  2753     __ or3(O0_from, O1_to, G1_bits);
  2754     __ or3(O2_count,       G1_bits, G1_bits);
  2756     __ btst(BytesPerLong-1, G1_bits);
  2757     __ br(Assembler::zero, true, Assembler::pt,
  2758           long_copy_entry, relocInfo::runtime_call_type);
  2759     // scale the count on the way out:
  2760     __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
  2762     __ btst(BytesPerInt-1, G1_bits);
  2763     __ br(Assembler::zero, true, Assembler::pt,
  2764           int_copy_entry, relocInfo::runtime_call_type);
  2765     // scale the count on the way out:
  2766     __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
  2768     __ btst(BytesPerShort-1, G1_bits);
  2769     __ br(Assembler::zero, true, Assembler::pt,
  2770           short_copy_entry, relocInfo::runtime_call_type);
  2771     // scale the count on the way out:
  2772     __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
  2774     __ br(Assembler::always, false, Assembler::pt,
  2775           byte_copy_entry, relocInfo::runtime_call_type);
  2776     __ delayed()->nop();
  2778     return start;
  2782   // Perform range checks on the proposed arraycopy.
  2783   // Kills the two temps, but nothing else.
  2784   // Also, clean the sign bits of src_pos and dst_pos.
  2785   void arraycopy_range_checks(Register src,     // source array oop (O0)
  2786                               Register src_pos, // source position (O1)
  2787                               Register dst,     // destination array oo (O2)
  2788                               Register dst_pos, // destination position (O3)
  2789                               Register length,  // length of copy (O4)
  2790                               Register temp1, Register temp2,
  2791                               Label& L_failed) {
  2792     BLOCK_COMMENT("arraycopy_range_checks:");
  2794     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
  2796     const Register array_length = temp1;  // scratch
  2797     const Register end_pos      = temp2;  // scratch
  2799     // Note:  This next instruction may be in the delay slot of a branch:
  2800     __ add(length, src_pos, end_pos);  // src_pos + length
  2801     __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
  2802     __ cmp(end_pos, array_length);
  2803     __ br(Assembler::greater, false, Assembler::pn, L_failed);
  2805     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
  2806     __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
  2807     __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
  2808     __ cmp(end_pos, array_length);
  2809     __ br(Assembler::greater, false, Assembler::pn, L_failed);
  2811     // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
  2812     // Move with sign extension can be used since they are positive.
  2813     __ delayed()->signx(src_pos, src_pos);
  2814     __ signx(dst_pos, dst_pos);
  2816     BLOCK_COMMENT("arraycopy_range_checks done");
  2820   //
  2821   //  Generate generic array copy stubs
  2822   //
  2823   //  Input:
  2824   //    O0    -  src oop
  2825   //    O1    -  src_pos
  2826   //    O2    -  dst oop
  2827   //    O3    -  dst_pos
  2828   //    O4    -  element count
  2829   //
  2830   //  Output:
  2831   //    O0 ==  0  -  success
  2832   //    O0 == -1  -  need to call System.arraycopy
  2833   //
  2834   address generate_generic_copy(const char *name,
  2835                                 address entry_jbyte_arraycopy,
  2836                                 address entry_jshort_arraycopy,
  2837                                 address entry_jint_arraycopy,
  2838                                 address entry_oop_arraycopy,
  2839                                 address entry_jlong_arraycopy,
  2840                                 address entry_checkcast_arraycopy) {
  2841     Label L_failed, L_objArray;
  2843     // Input registers
  2844     const Register src      = O0;  // source array oop
  2845     const Register src_pos  = O1;  // source position
  2846     const Register dst      = O2;  // destination array oop
  2847     const Register dst_pos  = O3;  // destination position
  2848     const Register length   = O4;  // elements count
  2850     // registers used as temp
  2851     const Register G3_src_klass = G3; // source array klass
  2852     const Register G4_dst_klass = G4; // destination array klass
  2853     const Register G5_lh        = G5; // layout handler
  2854     const Register O5_temp      = O5;
  2856     __ align(CodeEntryAlignment);
  2857     StubCodeMark mark(this, "StubRoutines", name);
  2858     address start = __ pc();
  2860     // bump this on entry, not on exit:
  2861     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
  2863     // In principle, the int arguments could be dirty.
  2864     //assert_clean_int(src_pos, G1);
  2865     //assert_clean_int(dst_pos, G1);
  2866     //assert_clean_int(length, G1);
  2868     //-----------------------------------------------------------------------
  2869     // Assembler stubs will be used for this call to arraycopy
  2870     // if the following conditions are met:
  2871     //
  2872     // (1) src and dst must not be null.
  2873     // (2) src_pos must not be negative.
  2874     // (3) dst_pos must not be negative.
  2875     // (4) length  must not be negative.
  2876     // (5) src klass and dst klass should be the same and not NULL.
  2877     // (6) src and dst should be arrays.
  2878     // (7) src_pos + length must not exceed length of src.
  2879     // (8) dst_pos + length must not exceed length of dst.
  2880     BLOCK_COMMENT("arraycopy initial argument checks");
  2882     //  if (src == NULL) return -1;
  2883     __ br_null(src, false, Assembler::pn, L_failed);
  2885     //  if (src_pos < 0) return -1;
  2886     __ delayed()->tst(src_pos);
  2887     __ br(Assembler::negative, false, Assembler::pn, L_failed);
  2888     __ delayed()->nop();
  2890     //  if (dst == NULL) return -1;
  2891     __ br_null(dst, false, Assembler::pn, L_failed);
  2893     //  if (dst_pos < 0) return -1;
  2894     __ delayed()->tst(dst_pos);
  2895     __ br(Assembler::negative, false, Assembler::pn, L_failed);
  2897     //  if (length < 0) return -1;
  2898     __ delayed()->tst(length);
  2899     __ br(Assembler::negative, false, Assembler::pn, L_failed);
  2901     BLOCK_COMMENT("arraycopy argument klass checks");
  2902     //  get src->klass()
  2903     if (UseCompressedKlassPointers) {
  2904       __ delayed()->nop(); // ??? not good
  2905       __ load_klass(src, G3_src_klass);
  2906     } else {
  2907       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
  2910 #ifdef ASSERT
  2911     //  assert(src->klass() != NULL);
  2912     BLOCK_COMMENT("assert klasses not null");
  2913     { Label L_a, L_b;
  2914       __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL
  2915       __ bind(L_a);
  2916       __ stop("broken null klass");
  2917       __ bind(L_b);
  2918       __ load_klass(dst, G4_dst_klass);
  2919       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
  2920       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
  2921       BLOCK_COMMENT("assert done");
  2923 #endif
  2925     // Load layout helper
  2926     //
  2927     //  |array_tag|     | header_size | element_type |     |log2_element_size|
  2928     // 32        30    24            16              8     2                 0
  2929     //
  2930     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
  2931     //
  2933     int lh_offset = in_bytes(Klass::layout_helper_offset());
  2935     // Load 32-bits signed value. Use br() instruction with it to check icc.
  2936     __ lduw(G3_src_klass, lh_offset, G5_lh);
  2938     if (UseCompressedKlassPointers) {
  2939       __ load_klass(dst, G4_dst_klass);
  2941     // Handle objArrays completely differently...
  2942     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
  2943     __ set(objArray_lh, O5_temp);
  2944     __ cmp(G5_lh,       O5_temp);
  2945     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
  2946     if (UseCompressedKlassPointers) {
  2947       __ delayed()->nop();
  2948     } else {
  2949       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
  2952     //  if (src->klass() != dst->klass()) return -1;
  2953     __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed);
  2955     //  if (!src->is_Array()) return -1;
  2956     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
  2957     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
  2959     // At this point, it is known to be a typeArray (array_tag 0x3).
  2960 #ifdef ASSERT
  2961     __ delayed()->nop();
  2962     { Label L;
  2963       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
  2964       __ set(lh_prim_tag_in_place, O5_temp);
  2965       __ cmp(G5_lh,                O5_temp);
  2966       __ br(Assembler::greaterEqual, false, Assembler::pt, L);
  2967       __ delayed()->nop();
  2968       __ stop("must be a primitive array");
  2969       __ bind(L);
  2971 #else
  2972     __ delayed();                               // match next insn to prev branch
  2973 #endif
  2975     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
  2976                            O5_temp, G4_dst_klass, L_failed);
  2978     // TypeArrayKlass
  2979     //
  2980     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
  2981     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
  2982     //
  2984     const Register G4_offset = G4_dst_klass;    // array offset
  2985     const Register G3_elsize = G3_src_klass;    // log2 element size
  2987     __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
  2988     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
  2989     __ add(src, G4_offset, src);       // src array offset
  2990     __ add(dst, G4_offset, dst);       // dst array offset
  2991     __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
  2993     // next registers should be set before the jump to corresponding stub
  2994     const Register from     = O0;  // source array address
  2995     const Register to       = O1;  // destination array address
  2996     const Register count    = O2;  // elements count
  2998     // 'from', 'to', 'count' registers should be set in this order
  2999     // since they are the same as 'src', 'src_pos', 'dst'.
  3001     BLOCK_COMMENT("scale indexes to element size");
  3002     __ sll_ptr(src_pos, G3_elsize, src_pos);
  3003     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
  3004     __ add(src, src_pos, from);       // src_addr
  3005     __ add(dst, dst_pos, to);         // dst_addr
  3007     BLOCK_COMMENT("choose copy loop based on element size");
  3008     __ cmp(G3_elsize, 0);
  3009     __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy);
  3010     __ delayed()->signx(length, count); // length
  3012     __ cmp(G3_elsize, LogBytesPerShort);
  3013     __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy);
  3014     __ delayed()->signx(length, count); // length
  3016     __ cmp(G3_elsize, LogBytesPerInt);
  3017     __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy);
  3018     __ delayed()->signx(length, count); // length
  3019 #ifdef ASSERT
  3020     { Label L;
  3021       __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L);
  3022       __ stop("must be long copy, but elsize is wrong");
  3023       __ bind(L);
  3025 #endif
  3026     __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy);
  3027     __ delayed()->signx(length, count); // length
  3029     // ObjArrayKlass
  3030   __ BIND(L_objArray);
  3031     // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
  3033     Label L_plain_copy, L_checkcast_copy;
  3034     //  test array classes for subtyping
  3035     __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
  3036     __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
  3037     __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
  3039     // Identically typed arrays can be copied without element-wise checks.
  3040     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
  3041                            O5_temp, G5_lh, L_failed);
  3043     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
  3044     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
  3045     __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
  3046     __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
  3047     __ add(src, src_pos, from);       // src_addr
  3048     __ add(dst, dst_pos, to);         // dst_addr
  3049   __ BIND(L_plain_copy);
  3050     __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy);
  3051     __ delayed()->signx(length, count); // length
  3053   __ BIND(L_checkcast_copy);
  3054     // live at this point:  G3_src_klass, G4_dst_klass
  3056       // Before looking at dst.length, make sure dst is also an objArray.
  3057       // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
  3058       __ cmp(G5_lh,                    O5_temp);
  3059       __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
  3061       // It is safe to examine both src.length and dst.length.
  3062       __ delayed();                             // match next insn to prev branch
  3063       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
  3064                              O5_temp, G5_lh, L_failed);
  3066       // Marshal the base address arguments now, freeing registers.
  3067       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
  3068       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
  3069       __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
  3070       __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
  3071       __ add(src, src_pos, from);               // src_addr
  3072       __ add(dst, dst_pos, to);                 // dst_addr
  3073       __ signx(length, count);                  // length (reloaded)
  3075       Register sco_temp = O3;                   // this register is free now
  3076       assert_different_registers(from, to, count, sco_temp,
  3077                                  G4_dst_klass, G3_src_klass);
  3079       // Generate the type check.
  3080       int sco_offset = in_bytes(Klass::super_check_offset_offset());
  3081       __ lduw(G4_dst_klass, sco_offset, sco_temp);
  3082       generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
  3083                           O5_temp, L_plain_copy);
  3085       // Fetch destination element klass from the ObjArrayKlass header.
  3086       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
  3088       // the checkcast_copy loop needs two extra arguments:
  3089       __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
  3090       // lduw(O4, sco_offset, O3);              // sco of elem klass
  3092       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
  3093       __ delayed()->lduw(O4, sco_offset, O3);
  3096   __ BIND(L_failed);
  3097     __ retl();
  3098     __ delayed()->sub(G0, 1, O0); // return -1
  3099     return start;
  3102   //
  3103   //  Generate stub for heap zeroing.
  3104   //  "to" address is aligned to jlong (8 bytes).
  3105   //
  3106   // Arguments for generated stub:
  3107   //      to:    O0
  3108   //      count: O1 treated as signed (count of HeapWord)
  3109   //             count could be 0
  3110   //
  3111   address generate_zero_aligned_words(const char* name) {
  3112     __ align(CodeEntryAlignment);
  3113     StubCodeMark mark(this, "StubRoutines", name);
  3114     address start = __ pc();
  3116     const Register to    = O0;   // source array address
  3117     const Register count = O1;   // HeapWords count
  3118     const Register temp  = O2;   // scratch
  3120     Label Ldone;
  3121     __ sllx(count, LogHeapWordSize, count); // to bytes count
  3122     // Use BIS for zeroing
  3123     __ bis_zeroing(to, count, temp, Ldone);
  3124     __ bind(Ldone);
  3125     __ retl();
  3126     __ delayed()->nop();
  3127     return start;
  3130   void generate_arraycopy_stubs() {
  3131     address entry;
  3132     address entry_jbyte_arraycopy;
  3133     address entry_jshort_arraycopy;
  3134     address entry_jint_arraycopy;
  3135     address entry_oop_arraycopy;
  3136     address entry_jlong_arraycopy;
  3137     address entry_checkcast_arraycopy;
  3139     //*** jbyte
  3140     // Always need aligned and unaligned versions
  3141     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
  3142                                                                                   "jbyte_disjoint_arraycopy");
  3143     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
  3144                                                                                   &entry_jbyte_arraycopy,
  3145                                                                                   "jbyte_arraycopy");
  3146     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
  3147                                                                                   "arrayof_jbyte_disjoint_arraycopy");
  3148     StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
  3149                                                                                   "arrayof_jbyte_arraycopy");
  3151     //*** jshort
  3152     // Always need aligned and unaligned versions
  3153     StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
  3154                                                                                     "jshort_disjoint_arraycopy");
  3155     StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
  3156                                                                                     &entry_jshort_arraycopy,
  3157                                                                                     "jshort_arraycopy");
  3158     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
  3159                                                                                     "arrayof_jshort_disjoint_arraycopy");
  3160     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
  3161                                                                                     "arrayof_jshort_arraycopy");
  3163     //*** jint
  3164     // Aligned versions
  3165     StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
  3166                                                                                 "arrayof_jint_disjoint_arraycopy");
  3167     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
  3168                                                                                 "arrayof_jint_arraycopy");
  3169 #ifdef _LP64
  3170     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
  3171     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
  3172     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
  3173                                                                                 "jint_disjoint_arraycopy");
  3174     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
  3175                                                                                 &entry_jint_arraycopy,
  3176                                                                                 "jint_arraycopy");
  3177 #else
  3178     // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
  3179     // (in fact in 32bit we always have a pre-loop part even in the aligned version,
  3180     //  because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
  3181     StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
  3182     StubRoutines::_jint_arraycopy          = StubRoutines::_arrayof_jint_arraycopy;
  3183 #endif
  3186     //*** jlong
  3187     // It is always aligned
  3188     StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
  3189                                                                                   "arrayof_jlong_disjoint_arraycopy");
  3190     StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
  3191                                                                                   "arrayof_jlong_arraycopy");
  3192     StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
  3193     StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
  3196     //*** oops
  3197     // Aligned versions
  3198     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
  3199                                                                                       "arrayof_oop_disjoint_arraycopy");
  3200     StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
  3201                                                                                       "arrayof_oop_arraycopy");
  3202     // Aligned versions without pre-barriers
  3203     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry,
  3204                                                                                       "arrayof_oop_disjoint_arraycopy_uninit",
  3205                                                                                       /*dest_uninitialized*/true);
  3206     StubRoutines::_arrayof_oop_arraycopy_uninit          = generate_conjoint_oop_copy(true, entry, NULL,
  3207                                                                                       "arrayof_oop_arraycopy_uninit",
  3208                                                                                       /*dest_uninitialized*/true);
  3209 #ifdef _LP64
  3210     if (UseCompressedOops) {
  3211       // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
  3212       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
  3213                                                                                     "oop_disjoint_arraycopy");
  3214       StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
  3215                                                                                     "oop_arraycopy");
  3216       // Unaligned versions without pre-barriers
  3217       StubRoutines::_oop_disjoint_arraycopy_uninit     = generate_disjoint_oop_copy(false, &entry,
  3218                                                                                     "oop_disjoint_arraycopy_uninit",
  3219                                                                                     /*dest_uninitialized*/true);
  3220       StubRoutines::_oop_arraycopy_uninit              = generate_conjoint_oop_copy(false, entry, NULL,
  3221                                                                                     "oop_arraycopy_uninit",
  3222                                                                                     /*dest_uninitialized*/true);
  3223     } else
  3224 #endif
  3226       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
  3227       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
  3228       StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
  3229       StubRoutines::_oop_disjoint_arraycopy_uninit     = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit;
  3230       StubRoutines::_oop_arraycopy_uninit              = StubRoutines::_arrayof_oop_arraycopy_uninit;
  3233     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
  3234     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
  3235                                                                         /*dest_uninitialized*/true);
  3237     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
  3238                                                               entry_jbyte_arraycopy,
  3239                                                               entry_jshort_arraycopy,
  3240                                                               entry_jint_arraycopy,
  3241                                                               entry_jlong_arraycopy);
  3242     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
  3243                                                                entry_jbyte_arraycopy,
  3244                                                                entry_jshort_arraycopy,
  3245                                                                entry_jint_arraycopy,
  3246                                                                entry_oop_arraycopy,
  3247                                                                entry_jlong_arraycopy,
  3248                                                                entry_checkcast_arraycopy);
  3250     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
  3251     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
  3252     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
  3253     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
  3254     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
  3255     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
  3257     if (UseBlockZeroing) {
  3258       StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
  3262   void generate_initial() {
  3263     // Generates all stubs and initializes the entry points
  3265     //------------------------------------------------------------------------------------------------------------------------
  3266     // entry points that exist in all platforms
  3267     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
  3268     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
  3269     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
  3271     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
  3272     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
  3274     //------------------------------------------------------------------------------------------------------------------------
  3275     // entry points that are platform specific
  3276     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
  3278     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
  3279     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
  3281 #if !defined(COMPILER2) && !defined(_LP64)
  3282     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
  3283     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
  3284     StubRoutines::_atomic_add_entry          = generate_atomic_add();
  3285     StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
  3286     StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
  3287     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
  3288     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
  3289 #endif  // COMPILER2 !=> _LP64
  3291     // Build this early so it's available for the interpreter.
  3292     StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
  3296   void generate_all() {
  3297     // Generates all stubs and initializes the entry points
  3299     // Generate partial_subtype_check first here since its code depends on
  3300     // UseZeroBaseCompressedOops which is defined after heap initialization.
  3301     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
  3302     // These entry points require SharedInfo::stack0 to be set up in non-core builds
  3303     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
  3304     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
  3305     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
  3307     StubRoutines::_handler_for_unsafe_access_entry =
  3308       generate_handler_for_unsafe_access();
  3310     // support for verify_oop (must happen after universe_init)
  3311     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop_subroutine();
  3313     // arraycopy stubs used by compilers
  3314     generate_arraycopy_stubs();
  3316     // Don't initialize the platform math functions since sparc
  3317     // doesn't have intrinsics for these operations.
  3321  public:
  3322   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
  3323     // replace the standard masm with a special one:
  3324     _masm = new MacroAssembler(code);
  3326     _stub_count = !all ? 0x100 : 0x200;
  3327     if (all) {
  3328       generate_all();
  3329     } else {
  3330       generate_initial();
  3333     // make sure this stub is available for all local calls
  3334     if (_atomic_add_stub.is_unbound()) {
  3335       // generate a second time, if necessary
  3336       (void) generate_atomic_add();
  3341  private:
  3342   int _stub_count;
  3343   void stub_prolog(StubCodeDesc* cdesc) {
  3344     # ifdef ASSERT
  3345       // put extra information in the stub code, to make it more readable
  3346 #ifdef _LP64
  3347 // Write the high part of the address
  3348 // [RGV] Check if there is a dependency on the size of this prolog
  3349       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
  3350 #endif
  3351       __ emit_data((intptr_t)cdesc,    relocInfo::none);
  3352       __ emit_data(++_stub_count, relocInfo::none);
  3353     # endif
  3354     align(true);
  3357   void align(bool at_header = false) {
  3358     // %%%%% move this constant somewhere else
  3359     // UltraSPARC cache line size is 8 instructions:
  3360     const unsigned int icache_line_size = 32;
  3361     const unsigned int icache_half_line_size = 16;
  3363     if (at_header) {
  3364       while ((intptr_t)(__ pc()) % icache_line_size != 0) {
  3365         __ emit_data(0, relocInfo::none);
  3367     } else {
  3368       while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
  3369         __ nop();
  3374 }; // end class declaration
  3376 void StubGenerator_generate(CodeBuffer* code, bool all) {
  3377   StubGenerator g(code, all);

mercurial