Tue, 22 Feb 2011 15:25:02 -0800
7020521: arraycopy stubs place prebarriers incorrectly
Summary: Rearranged the pre-barrier placement in arraycopy stubs so that they are properly called in case of chained calls. Also refactored the code a little bit so that it looks uniform across the platforms and is more readable.
Reviewed-by: never, kvn
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_sparc.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "nativeInst_sparc.hpp"
30 #include "oops/instanceOop.hpp"
31 #include "oops/methodOop.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/top.hpp"
41 #ifdef TARGET_OS_FAMILY_linux
42 # include "thread_linux.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_FAMILY_solaris
45 # include "thread_solaris.inline.hpp"
46 #endif
47 #ifdef COMPILER2
48 #include "opto/runtime.hpp"
49 #endif
51 // Declaration and definition of StubGenerator (no .hpp file).
52 // For a more detailed description of the stub routine structure
53 // see the comment in stubRoutines.hpp.
55 #define __ _masm->
57 #ifdef PRODUCT
58 #define BLOCK_COMMENT(str) /* nothing */
59 #else
60 #define BLOCK_COMMENT(str) __ block_comment(str)
61 #endif
63 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
65 // Note: The register L7 is used as L7_thread_cache, and may not be used
66 // any other way within this module.
69 static const Register& Lstub_temp = L2;
71 // -------------------------------------------------------------------------------------------------------------------------
72 // Stub Code definitions
74 static address handle_unsafe_access() {
75 JavaThread* thread = JavaThread::current();
76 address pc = thread->saved_exception_pc();
77 address npc = thread->saved_exception_npc();
78 // pc is the instruction which we must emulate
79 // doing a no-op is fine: return garbage from the load
81 // request an async exception
82 thread->set_pending_unsafe_access_error();
84 // return address of next instruction to execute
85 return npc;
86 }
88 class StubGenerator: public StubCodeGenerator {
89 private:
91 #ifdef PRODUCT
92 #define inc_counter_np(a,b,c) (0)
93 #else
94 #define inc_counter_np(counter, t1, t2) \
95 BLOCK_COMMENT("inc_counter " #counter); \
96 __ inc_counter(&counter, t1, t2);
97 #endif
99 //----------------------------------------------------------------------------------------------------
100 // Call stubs are used to call Java from C
102 address generate_call_stub(address& return_pc) {
103 StubCodeMark mark(this, "StubRoutines", "call_stub");
104 address start = __ pc();
106 // Incoming arguments:
107 //
108 // o0 : call wrapper address
109 // o1 : result (address)
110 // o2 : result type
111 // o3 : method
112 // o4 : (interpreter) entry point
113 // o5 : parameters (address)
114 // [sp + 0x5c]: parameter size (in words)
115 // [sp + 0x60]: thread
116 //
117 // +---------------+ <--- sp + 0
118 // | |
119 // . reg save area .
120 // | |
121 // +---------------+ <--- sp + 0x40
122 // | |
123 // . extra 7 slots .
124 // | |
125 // +---------------+ <--- sp + 0x5c
126 // | param. size |
127 // +---------------+ <--- sp + 0x60
128 // | thread |
129 // +---------------+
130 // | |
132 // note: if the link argument position changes, adjust
133 // the code in frame::entry_frame_call_wrapper()
135 const Argument link = Argument(0, false); // used only for GC
136 const Argument result = Argument(1, false);
137 const Argument result_type = Argument(2, false);
138 const Argument method = Argument(3, false);
139 const Argument entry_point = Argument(4, false);
140 const Argument parameters = Argument(5, false);
141 const Argument parameter_size = Argument(6, false);
142 const Argument thread = Argument(7, false);
144 // setup thread register
145 __ ld_ptr(thread.as_address(), G2_thread);
146 __ reinit_heapbase();
148 #ifdef ASSERT
149 // make sure we have no pending exceptions
150 { const Register t = G3_scratch;
151 Label L;
152 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
153 __ br_null(t, false, Assembler::pt, L);
154 __ delayed()->nop();
155 __ stop("StubRoutines::call_stub: entered with pending exception");
156 __ bind(L);
157 }
158 #endif
160 // create activation frame & allocate space for parameters
161 { const Register t = G3_scratch;
162 __ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words)
163 __ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words)
164 __ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words)
165 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
166 __ neg(t); // negate so it can be used with save
167 __ save(SP, t, SP); // setup new frame
168 }
170 // +---------------+ <--- sp + 0
171 // | |
172 // . reg save area .
173 // | |
174 // +---------------+ <--- sp + 0x40
175 // | |
176 // . extra 7 slots .
177 // | |
178 // +---------------+ <--- sp + 0x5c
179 // | empty slot | (only if parameter size is even)
180 // +---------------+
181 // | |
182 // . parameters .
183 // | |
184 // +---------------+ <--- fp + 0
185 // | |
186 // . reg save area .
187 // | |
188 // +---------------+ <--- fp + 0x40
189 // | |
190 // . extra 7 slots .
191 // | |
192 // +---------------+ <--- fp + 0x5c
193 // | param. size |
194 // +---------------+ <--- fp + 0x60
195 // | thread |
196 // +---------------+
197 // | |
199 // pass parameters if any
200 BLOCK_COMMENT("pass parameters if any");
201 { const Register src = parameters.as_in().as_register();
202 const Register dst = Lentry_args;
203 const Register tmp = G3_scratch;
204 const Register cnt = G4_scratch;
206 // test if any parameters & setup of Lentry_args
207 Label exit;
208 __ ld_ptr(parameter_size.as_in().as_address(), cnt); // parameter counter
209 __ add( FP, STACK_BIAS, dst );
210 __ tst(cnt);
211 __ br(Assembler::zero, false, Assembler::pn, exit);
212 __ delayed()->sub(dst, BytesPerWord, dst); // setup Lentry_args
214 // copy parameters if any
215 Label loop;
216 __ BIND(loop);
217 // Store parameter value
218 __ ld_ptr(src, 0, tmp);
219 __ add(src, BytesPerWord, src);
220 __ st_ptr(tmp, dst, 0);
221 __ deccc(cnt);
222 __ br(Assembler::greater, false, Assembler::pt, loop);
223 __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
225 // done
226 __ BIND(exit);
227 }
229 // setup parameters, method & call Java function
230 #ifdef ASSERT
231 // layout_activation_impl checks it's notion of saved SP against
232 // this register, so if this changes update it as well.
233 const Register saved_SP = Lscratch;
234 __ mov(SP, saved_SP); // keep track of SP before call
235 #endif
237 // setup parameters
238 const Register t = G3_scratch;
239 __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
240 __ sll(t, Interpreter::logStackElementSize, t); // compute number of bytes
241 __ sub(FP, t, Gargs); // setup parameter pointer
242 #ifdef _LP64
243 __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
244 #endif
245 __ mov(SP, O5_savedSP);
248 // do the call
249 //
250 // the following register must be setup:
251 //
252 // G2_thread
253 // G5_method
254 // Gargs
255 BLOCK_COMMENT("call Java function");
256 __ jmpl(entry_point.as_in().as_register(), G0, O7);
257 __ delayed()->mov(method.as_in().as_register(), G5_method); // setup method
259 BLOCK_COMMENT("call_stub_return_address:");
260 return_pc = __ pc();
262 // The callee, if it wasn't interpreted, can return with SP changed so
263 // we can no longer assert of change of SP.
265 // store result depending on type
266 // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
267 // is treated as T_INT)
268 { const Register addr = result .as_in().as_register();
269 const Register type = result_type.as_in().as_register();
270 Label is_long, is_float, is_double, is_object, exit;
271 __ cmp(type, T_OBJECT); __ br(Assembler::equal, false, Assembler::pn, is_object);
272 __ delayed()->cmp(type, T_FLOAT); __ br(Assembler::equal, false, Assembler::pn, is_float);
273 __ delayed()->cmp(type, T_DOUBLE); __ br(Assembler::equal, false, Assembler::pn, is_double);
274 __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long);
275 __ delayed()->nop();
277 // store int result
278 __ st(O0, addr, G0);
280 __ BIND(exit);
281 __ ret();
282 __ delayed()->restore();
284 __ BIND(is_object);
285 __ ba(false, exit);
286 __ delayed()->st_ptr(O0, addr, G0);
288 __ BIND(is_float);
289 __ ba(false, exit);
290 __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
292 __ BIND(is_double);
293 __ ba(false, exit);
294 __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
296 __ BIND(is_long);
297 #ifdef _LP64
298 __ ba(false, exit);
299 __ delayed()->st_long(O0, addr, G0); // store entire long
300 #else
301 #if defined(COMPILER2)
302 // All return values are where we want them, except for Longs. C2 returns
303 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
304 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
305 // build we simply always use G1.
306 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
307 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
308 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
310 __ ba(false, exit);
311 __ delayed()->stx(G1, addr, G0); // store entire long
312 #else
313 __ st(O1, addr, BytesPerInt);
314 __ ba(false, exit);
315 __ delayed()->st(O0, addr, G0);
316 #endif /* COMPILER2 */
317 #endif /* _LP64 */
318 }
319 return start;
320 }
323 //----------------------------------------------------------------------------------------------------
324 // Return point for a Java call if there's an exception thrown in Java code.
325 // The exception is caught and transformed into a pending exception stored in
326 // JavaThread that can be tested from within the VM.
327 //
328 // Oexception: exception oop
330 address generate_catch_exception() {
331 StubCodeMark mark(this, "StubRoutines", "catch_exception");
333 address start = __ pc();
334 // verify that thread corresponds
335 __ verify_thread();
337 const Register& temp_reg = Gtemp;
338 Address pending_exception_addr (G2_thread, Thread::pending_exception_offset());
339 Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset ());
340 Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset ());
342 // set pending exception
343 __ verify_oop(Oexception);
344 __ st_ptr(Oexception, pending_exception_addr);
345 __ set((intptr_t)__FILE__, temp_reg);
346 __ st_ptr(temp_reg, exception_file_offset_addr);
347 __ set((intptr_t)__LINE__, temp_reg);
348 __ st(temp_reg, exception_line_offset_addr);
350 // complete return to VM
351 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
353 AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
354 __ jump_to(stub_ret, temp_reg);
355 __ delayed()->nop();
357 return start;
358 }
361 //----------------------------------------------------------------------------------------------------
362 // Continuation point for runtime calls returning with a pending exception
363 // The pending exception check happened in the runtime or native call stub
364 // The pending exception in Thread is converted into a Java-level exception
365 //
366 // Contract with Java-level exception handler: O0 = exception
367 // O1 = throwing pc
369 address generate_forward_exception() {
370 StubCodeMark mark(this, "StubRoutines", "forward_exception");
371 address start = __ pc();
373 // Upon entry, O7 has the return address returning into Java
374 // (interpreted or compiled) code; i.e. the return address
375 // becomes the throwing pc.
377 const Register& handler_reg = Gtemp;
379 Address exception_addr(G2_thread, Thread::pending_exception_offset());
381 #ifdef ASSERT
382 // make sure that this code is only executed if there is a pending exception
383 { Label L;
384 __ ld_ptr(exception_addr, Gtemp);
385 __ br_notnull(Gtemp, false, Assembler::pt, L);
386 __ delayed()->nop();
387 __ stop("StubRoutines::forward exception: no pending exception (1)");
388 __ bind(L);
389 }
390 #endif
392 // compute exception handler into handler_reg
393 __ get_thread();
394 __ ld_ptr(exception_addr, Oexception);
395 __ verify_oop(Oexception);
396 __ save_frame(0); // compensates for compiler weakness
397 __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
398 BLOCK_COMMENT("call exception_handler_for_return_address");
399 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
400 __ mov(O0, handler_reg);
401 __ restore(); // compensates for compiler weakness
403 __ ld_ptr(exception_addr, Oexception);
404 __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
406 #ifdef ASSERT
407 // make sure exception is set
408 { Label L;
409 __ br_notnull(Oexception, false, Assembler::pt, L);
410 __ delayed()->nop();
411 __ stop("StubRoutines::forward exception: no pending exception (2)");
412 __ bind(L);
413 }
414 #endif
415 // jump to exception handler
416 __ jmp(handler_reg, 0);
417 // clear pending exception
418 __ delayed()->st_ptr(G0, exception_addr);
420 return start;
421 }
424 //------------------------------------------------------------------------------------------------------------------------
425 // Continuation point for throwing of implicit exceptions that are not handled in
426 // the current activation. Fabricates an exception oop and initiates normal
427 // exception dispatching in this frame. Only callee-saved registers are preserved
428 // (through the normal register window / RegisterMap handling).
429 // If the compiler needs all registers to be preserved between the fault
430 // point and the exception handler then it must assume responsibility for that in
431 // AbstractCompiler::continuation_for_implicit_null_exception or
432 // continuation_for_implicit_division_by_zero_exception. All other implicit
433 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
434 // either at call sites or otherwise assume that stack unwinding will be initiated,
435 // so caller saved registers were assumed volatile in the compiler.
437 // Note that we generate only this stub into a RuntimeStub, because it needs to be
438 // properly traversed and ignored during GC, so we change the meaning of the "__"
439 // macro within this method.
440 #undef __
441 #define __ masm->
443 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {
444 #ifdef ASSERT
445 int insts_size = VerifyThread ? 1 * K : 600;
446 #else
447 int insts_size = VerifyThread ? 1 * K : 256;
448 #endif /* ASSERT */
449 int locs_size = 32;
451 CodeBuffer code(name, insts_size, locs_size);
452 MacroAssembler* masm = new MacroAssembler(&code);
454 __ verify_thread();
456 // This is an inlined and slightly modified version of call_VM
457 // which has the ability to fetch the return PC out of thread-local storage
458 __ assert_not_delayed();
460 // Note that we always push a frame because on the SPARC
461 // architecture, for all of our implicit exception kinds at call
462 // sites, the implicit exception is taken before the callee frame
463 // is pushed.
464 __ save_frame(0);
466 int frame_complete = __ offset();
468 if (restore_saved_exception_pc) {
469 __ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7);
470 __ sub(I7, frame::pc_return_offset, I7);
471 }
473 // Note that we always have a runtime stub frame on the top of stack by this point
474 Register last_java_sp = SP;
475 // 64-bit last_java_sp is biased!
476 __ set_last_Java_frame(last_java_sp, G0);
477 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early
478 __ save_thread(noreg);
479 // do the call
480 BLOCK_COMMENT("call runtime_entry");
481 __ call(runtime_entry, relocInfo::runtime_call_type);
482 if (!VerifyThread)
483 __ delayed()->mov(G2_thread, O0); // pass thread as first argument
484 else
485 __ delayed()->nop(); // (thread already passed)
486 __ restore_thread(noreg);
487 __ reset_last_Java_frame();
489 // check for pending exceptions. use Gtemp as scratch register.
490 #ifdef ASSERT
491 Label L;
493 Address exception_addr(G2_thread, Thread::pending_exception_offset());
494 Register scratch_reg = Gtemp;
495 __ ld_ptr(exception_addr, scratch_reg);
496 __ br_notnull(scratch_reg, false, Assembler::pt, L);
497 __ delayed()->nop();
498 __ should_not_reach_here();
499 __ bind(L);
500 #endif // ASSERT
501 BLOCK_COMMENT("call forward_exception_entry");
502 __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
503 // we use O7 linkage so that forward_exception_entry has the issuing PC
504 __ delayed()->restore();
506 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
507 return stub->entry_point();
508 }
510 #undef __
511 #define __ _masm->
514 // Generate a routine that sets all the registers so we
515 // can tell if the stop routine prints them correctly.
516 address generate_test_stop() {
517 StubCodeMark mark(this, "StubRoutines", "test_stop");
518 address start = __ pc();
520 int i;
522 __ save_frame(0);
524 static jfloat zero = 0.0, one = 1.0;
526 // put addr in L0, then load through L0 to F0
527 __ set((intptr_t)&zero, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F0);
528 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
530 // use add to put 2..18 in F2..F18
531 for ( i = 2; i <= 18; ++i ) {
532 __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1), as_FloatRegister(i));
533 }
535 // Now put double 2 in F16, double 18 in F18
536 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
537 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
539 // use add to put 20..32 in F20..F32
540 for (i = 20; i < 32; i += 2) {
541 __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i));
542 }
544 // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
545 for ( i = 0; i < 8; ++i ) {
546 if (i < 6) {
547 __ set( i, as_iRegister(i));
548 __ set(16 + i, as_oRegister(i));
549 __ set(24 + i, as_gRegister(i));
550 }
551 __ set( 8 + i, as_lRegister(i));
552 }
554 __ stop("testing stop");
557 __ ret();
558 __ delayed()->restore();
560 return start;
561 }
564 address generate_stop_subroutine() {
565 StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
566 address start = __ pc();
568 __ stop_subroutine();
570 return start;
571 }
573 address generate_flush_callers_register_windows() {
574 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
575 address start = __ pc();
577 __ flush_windows();
578 __ retl(false);
579 __ delayed()->add( FP, STACK_BIAS, O0 );
580 // The returned value must be a stack pointer whose register save area
581 // is flushed, and will stay flushed while the caller executes.
583 return start;
584 }
586 // Helper functions for v8 atomic operations.
587 //
588 void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
589 if (mark_oop_reg == noreg) {
590 address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
591 __ set((intptr_t)lock_ptr, lock_ptr_reg);
592 } else {
593 assert(scratch_reg != noreg, "just checking");
594 address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
595 __ set((intptr_t)lock_ptr, lock_ptr_reg);
596 __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
597 __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
598 }
599 }
601 void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
603 get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
604 __ set(StubRoutines::Sparc::locked, lock_reg);
605 // Initialize yield counter
606 __ mov(G0,yield_reg);
608 __ BIND(retry);
609 __ cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
610 __ br(Assembler::less, false, Assembler::pt, dontyield);
611 __ delayed()->nop();
613 // This code can only be called from inside the VM, this
614 // stub is only invoked from Atomic::add(). We do not
615 // want to use call_VM, because _last_java_sp and such
616 // must already be set.
617 //
618 // Save the regs and make space for a C call
619 __ save(SP, -96, SP);
620 __ save_all_globals_into_locals();
621 BLOCK_COMMENT("call os::naked_sleep");
622 __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
623 __ delayed()->nop();
624 __ restore_globals_from_locals();
625 __ restore();
626 // reset the counter
627 __ mov(G0,yield_reg);
629 __ BIND(dontyield);
631 // try to get lock
632 __ swap(lock_ptr_reg, 0, lock_reg);
634 // did we get the lock?
635 __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
636 __ br(Assembler::notEqual, true, Assembler::pn, retry);
637 __ delayed()->add(yield_reg,1,yield_reg);
639 // yes, got lock. do the operation here.
640 }
642 void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
643 __ st(lock_reg, lock_ptr_reg, 0); // unlock
644 }
646 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
647 //
648 // Arguments :
649 //
650 // exchange_value: O0
651 // dest: O1
652 //
653 // Results:
654 //
655 // O0: the value previously stored in dest
656 //
657 address generate_atomic_xchg() {
658 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
659 address start = __ pc();
661 if (UseCASForSwap) {
662 // Use CAS instead of swap, just in case the MP hardware
663 // prefers to work with just one kind of synch. instruction.
664 Label retry;
665 __ BIND(retry);
666 __ mov(O0, O3); // scratch copy of exchange value
667 __ ld(O1, 0, O2); // observe the previous value
668 // try to replace O2 with O3
669 __ cas_under_lock(O1, O2, O3,
670 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
671 __ cmp(O2, O3);
672 __ br(Assembler::notEqual, false, Assembler::pn, retry);
673 __ delayed()->nop();
675 __ retl(false);
676 __ delayed()->mov(O2, O0); // report previous value to caller
678 } else {
679 if (VM_Version::v9_instructions_work()) {
680 __ retl(false);
681 __ delayed()->swap(O1, 0, O0);
682 } else {
683 const Register& lock_reg = O2;
684 const Register& lock_ptr_reg = O3;
685 const Register& yield_reg = O4;
687 Label retry;
688 Label dontyield;
690 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
691 // got the lock, do the swap
692 __ swap(O1, 0, O0);
694 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
695 __ retl(false);
696 __ delayed()->nop();
697 }
698 }
700 return start;
701 }
704 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
705 //
706 // Arguments :
707 //
708 // exchange_value: O0
709 // dest: O1
710 // compare_value: O2
711 //
712 // Results:
713 //
714 // O0: the value previously stored in dest
715 //
716 // Overwrites (v8): O3,O4,O5
717 //
718 address generate_atomic_cmpxchg() {
719 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
720 address start = __ pc();
722 // cmpxchg(dest, compare_value, exchange_value)
723 __ cas_under_lock(O1, O2, O0,
724 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
725 __ retl(false);
726 __ delayed()->nop();
728 return start;
729 }
731 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
732 //
733 // Arguments :
734 //
735 // exchange_value: O1:O0
736 // dest: O2
737 // compare_value: O4:O3
738 //
739 // Results:
740 //
741 // O1:O0: the value previously stored in dest
742 //
743 // This only works on V9, on V8 we don't generate any
744 // code and just return NULL.
745 //
746 // Overwrites: G1,G2,G3
747 //
748 address generate_atomic_cmpxchg_long() {
749 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
750 address start = __ pc();
752 if (!VM_Version::supports_cx8())
753 return NULL;;
754 __ sllx(O0, 32, O0);
755 __ srl(O1, 0, O1);
756 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
757 __ sllx(O3, 32, O3);
758 __ srl(O4, 0, O4);
759 __ or3(O3,O4,O3); // O3 holds 64-bit value from exchange_value
760 __ casx(O2, O3, O0);
761 __ srl(O0, 0, O1); // unpacked return value in O1:O0
762 __ retl(false);
763 __ delayed()->srlx(O0, 32, O0);
765 return start;
766 }
769 // Support for jint Atomic::add(jint add_value, volatile jint* dest).
770 //
771 // Arguments :
772 //
773 // add_value: O0 (e.g., +1 or -1)
774 // dest: O1
775 //
776 // Results:
777 //
778 // O0: the new value stored in dest
779 //
780 // Overwrites (v9): O3
781 // Overwrites (v8): O3,O4,O5
782 //
783 address generate_atomic_add() {
784 StubCodeMark mark(this, "StubRoutines", "atomic_add");
785 address start = __ pc();
786 __ BIND(_atomic_add_stub);
788 if (VM_Version::v9_instructions_work()) {
789 Label(retry);
790 __ BIND(retry);
792 __ lduw(O1, 0, O2);
793 __ add(O0, O2, O3);
794 __ cas(O1, O2, O3);
795 __ cmp( O2, O3);
796 __ br(Assembler::notEqual, false, Assembler::pn, retry);
797 __ delayed()->nop();
798 __ retl(false);
799 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
800 } else {
801 const Register& lock_reg = O2;
802 const Register& lock_ptr_reg = O3;
803 const Register& value_reg = O4;
804 const Register& yield_reg = O5;
806 Label(retry);
807 Label(dontyield);
809 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
810 // got lock, do the increment
811 __ ld(O1, 0, value_reg);
812 __ add(O0, value_reg, value_reg);
813 __ st(value_reg, O1, 0);
815 // %%% only for RMO and PSO
816 __ membar(Assembler::StoreStore);
818 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
820 __ retl(false);
821 __ delayed()->mov(value_reg, O0);
822 }
824 return start;
825 }
826 Label _atomic_add_stub; // called from other stubs
829 //------------------------------------------------------------------------------------------------------------------------
830 // The following routine generates a subroutine to throw an asynchronous
831 // UnknownError when an unsafe access gets a fault that could not be
832 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
833 //
834 // Arguments :
835 //
836 // trapping PC: O7
837 //
838 // Results:
839 // posts an asynchronous exception, skips the trapping instruction
840 //
842 address generate_handler_for_unsafe_access() {
843 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
844 address start = __ pc();
846 const int preserve_register_words = (64 * 2);
847 Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
849 Register Lthread = L7_thread_cache;
850 int i;
852 __ save_frame(0);
853 __ mov(G1, L1);
854 __ mov(G2, L2);
855 __ mov(G3, L3);
856 __ mov(G4, L4);
857 __ mov(G5, L5);
858 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
859 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
860 }
862 address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
863 BLOCK_COMMENT("call handle_unsafe_access");
864 __ call(entry_point, relocInfo::runtime_call_type);
865 __ delayed()->nop();
867 __ mov(L1, G1);
868 __ mov(L2, G2);
869 __ mov(L3, G3);
870 __ mov(L4, G4);
871 __ mov(L5, G5);
872 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
873 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
874 }
876 __ verify_thread();
878 __ jmp(O0, 0);
879 __ delayed()->restore();
881 return start;
882 }
885 // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
886 // Arguments :
887 //
888 // ret : O0, returned
889 // icc/xcc: set as O0 (depending on wordSize)
890 // sub : O1, argument, not changed
891 // super: O2, argument, not changed
892 // raddr: O7, blown by call
893 address generate_partial_subtype_check() {
894 __ align(CodeEntryAlignment);
895 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
896 address start = __ pc();
897 Label miss;
899 #if defined(COMPILER2) && !defined(_LP64)
900 // Do not use a 'save' because it blows the 64-bit O registers.
901 __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
902 __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
903 __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
904 __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
905 __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
906 Register Rret = O0;
907 Register Rsub = O1;
908 Register Rsuper = O2;
909 #else
910 __ save_frame(0);
911 Register Rret = I0;
912 Register Rsub = I1;
913 Register Rsuper = I2;
914 #endif
916 Register L0_ary_len = L0;
917 Register L1_ary_ptr = L1;
918 Register L2_super = L2;
919 Register L3_index = L3;
921 __ check_klass_subtype_slow_path(Rsub, Rsuper,
922 L0, L1, L2, L3,
923 NULL, &miss);
925 // Match falls through here.
926 __ addcc(G0,0,Rret); // set Z flags, Z result
928 #if defined(COMPILER2) && !defined(_LP64)
929 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
930 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
931 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
932 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
933 __ retl(); // Result in Rret is zero; flags set to Z
934 __ delayed()->add(SP,4*wordSize,SP);
935 #else
936 __ ret(); // Result in Rret is zero; flags set to Z
937 __ delayed()->restore();
938 #endif
940 __ BIND(miss);
941 __ addcc(G0,1,Rret); // set NZ flags, NZ result
943 #if defined(COMPILER2) && !defined(_LP64)
944 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
945 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
946 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
947 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
948 __ retl(); // Result in Rret is != 0; flags set to NZ
949 __ delayed()->add(SP,4*wordSize,SP);
950 #else
951 __ ret(); // Result in Rret is != 0; flags set to NZ
952 __ delayed()->restore();
953 #endif
955 return start;
956 }
959 // Called from MacroAssembler::verify_oop
960 //
961 address generate_verify_oop_subroutine() {
962 StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
964 address start = __ pc();
966 __ verify_oop_subroutine();
968 return start;
969 }
972 //
973 // Verify that a register contains clean 32-bits positive value
974 // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
975 //
976 // Input:
977 // Rint - 32-bits value
978 // Rtmp - scratch
979 //
980 void assert_clean_int(Register Rint, Register Rtmp) {
981 #if defined(ASSERT) && defined(_LP64)
982 __ signx(Rint, Rtmp);
983 __ cmp(Rint, Rtmp);
984 __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
985 #endif
986 }
988 //
989 // Generate overlap test for array copy stubs
990 //
991 // Input:
992 // O0 - array1
993 // O1 - array2
994 // O2 - element count
995 //
996 // Kills temps: O3, O4
997 //
998 void array_overlap_test(address no_overlap_target, int log2_elem_size) {
999 assert(no_overlap_target != NULL, "must be generated");
1000 array_overlap_test(no_overlap_target, NULL, log2_elem_size);
1001 }
1002 void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
1003 array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
1004 }
1005 void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
1006 const Register from = O0;
1007 const Register to = O1;
1008 const Register count = O2;
1009 const Register to_from = O3; // to - from
1010 const Register byte_count = O4; // count << log2_elem_size
1012 __ subcc(to, from, to_from);
1013 __ sll_ptr(count, log2_elem_size, byte_count);
1014 if (NOLp == NULL)
1015 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
1016 else
1017 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
1018 __ delayed()->cmp(to_from, byte_count);
1019 if (NOLp == NULL)
1020 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
1021 else
1022 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
1023 __ delayed()->nop();
1024 }
1026 //
1027 // Generate pre-write barrier for array.
1028 //
1029 // Input:
1030 // addr - register containing starting address
1031 // count - register containing element count
1032 // tmp - scratch register
1033 //
1034 // The input registers are overwritten.
1035 //
1036 void gen_write_ref_array_pre_barrier(Register addr, Register count) {
1037 BarrierSet* bs = Universe::heap()->barrier_set();
1038 if (bs->has_write_ref_pre_barrier()) {
1039 assert(bs->has_write_ref_array_pre_opt(),
1040 "Else unsupported barrier set.");
1042 __ save_frame(0);
1043 // Save the necessary global regs... will be used after.
1044 if (addr->is_global()) {
1045 __ mov(addr, L0);
1046 }
1047 if (count->is_global()) {
1048 __ mov(count, L1);
1049 }
1050 __ mov(addr->after_save(), O0);
1051 // Get the count into O1
1052 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
1053 __ delayed()->mov(count->after_save(), O1);
1054 if (addr->is_global()) {
1055 __ mov(L0, addr);
1056 }
1057 if (count->is_global()) {
1058 __ mov(L1, count);
1059 }
1060 __ restore();
1061 }
1062 }
1063 //
1064 // Generate post-write barrier for array.
1065 //
1066 // Input:
1067 // addr - register containing starting address
1068 // count - register containing element count
1069 // tmp - scratch register
1070 //
1071 // The input registers are overwritten.
1072 //
1073 void gen_write_ref_array_post_barrier(Register addr, Register count,
1074 Register tmp) {
1075 BarrierSet* bs = Universe::heap()->barrier_set();
1077 switch (bs->kind()) {
1078 case BarrierSet::G1SATBCT:
1079 case BarrierSet::G1SATBCTLogging:
1080 {
1081 // Get some new fresh output registers.
1082 __ save_frame(0);
1083 __ mov(addr->after_save(), O0);
1084 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1085 __ delayed()->mov(count->after_save(), O1);
1086 __ restore();
1087 }
1088 break;
1089 case BarrierSet::CardTableModRef:
1090 case BarrierSet::CardTableExtension:
1091 {
1092 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1093 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1094 assert_different_registers(addr, count, tmp);
1096 Label L_loop;
1098 __ sll_ptr(count, LogBytesPerHeapOop, count);
1099 __ sub(count, BytesPerHeapOop, count);
1100 __ add(count, addr, count);
1101 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1102 __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
1103 __ srl_ptr(count, CardTableModRefBS::card_shift, count);
1104 __ sub(count, addr, count);
1105 AddressLiteral rs(ct->byte_map_base);
1106 __ set(rs, tmp);
1107 __ BIND(L_loop);
1108 __ stb(G0, tmp, addr);
1109 __ subcc(count, 1, count);
1110 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1111 __ delayed()->add(addr, 1, addr);
1112 }
1113 break;
1114 case BarrierSet::ModRef:
1115 break;
1116 default:
1117 ShouldNotReachHere();
1118 }
1119 }
1122 // Copy big chunks forward with shift
1123 //
1124 // Inputs:
1125 // from - source arrays
1126 // to - destination array aligned to 8-bytes
1127 // count - elements count to copy >= the count equivalent to 16 bytes
1128 // count_dec - elements count's decrement equivalent to 16 bytes
1129 // L_copy_bytes - copy exit label
1130 //
1131 void copy_16_bytes_forward_with_shift(Register from, Register to,
1132 Register count, int count_dec, Label& L_copy_bytes) {
1133 Label L_loop, L_aligned_copy, L_copy_last_bytes;
1135 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1136 __ andcc(from, 7, G1); // misaligned bytes
1137 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1138 __ delayed()->nop();
1140 const Register left_shift = G1; // left shift bit counter
1141 const Register right_shift = G5; // right shift bit counter
1143 __ sll(G1, LogBitsPerByte, left_shift);
1144 __ mov(64, right_shift);
1145 __ sub(right_shift, left_shift, right_shift);
1147 //
1148 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1149 // to form 2 aligned 8-bytes chunks to store.
1150 //
1151 __ deccc(count, count_dec); // Pre-decrement 'count'
1152 __ andn(from, 7, from); // Align address
1153 __ ldx(from, 0, O3);
1154 __ inc(from, 8);
1155 __ align(OptoLoopAlignment);
1156 __ BIND(L_loop);
1157 __ ldx(from, 0, O4);
1158 __ deccc(count, count_dec); // Can we do next iteration after this one?
1159 __ ldx(from, 8, G4);
1160 __ inc(to, 16);
1161 __ inc(from, 16);
1162 __ sllx(O3, left_shift, O3);
1163 __ srlx(O4, right_shift, G3);
1164 __ bset(G3, O3);
1165 __ stx(O3, to, -16);
1166 __ sllx(O4, left_shift, O4);
1167 __ srlx(G4, right_shift, G3);
1168 __ bset(G3, O4);
1169 __ stx(O4, to, -8);
1170 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1171 __ delayed()->mov(G4, O3);
1173 __ inccc(count, count_dec>>1 ); // + 8 bytes
1174 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1175 __ delayed()->inc(count, count_dec>>1); // restore 'count'
1177 // copy 8 bytes, part of them already loaded in O3
1178 __ ldx(from, 0, O4);
1179 __ inc(to, 8);
1180 __ inc(from, 8);
1181 __ sllx(O3, left_shift, O3);
1182 __ srlx(O4, right_shift, G3);
1183 __ bset(O3, G3);
1184 __ stx(G3, to, -8);
1186 __ BIND(L_copy_last_bytes);
1187 __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1188 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1189 __ delayed()->sub(from, right_shift, from); // restore address
1191 __ BIND(L_aligned_copy);
1192 }
1194 // Copy big chunks backward with shift
1195 //
1196 // Inputs:
1197 // end_from - source arrays end address
1198 // end_to - destination array end address aligned to 8-bytes
1199 // count - elements count to copy >= the count equivalent to 16 bytes
1200 // count_dec - elements count's decrement equivalent to 16 bytes
1201 // L_aligned_copy - aligned copy exit label
1202 // L_copy_bytes - copy exit label
1203 //
1204 void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1205 Register count, int count_dec,
1206 Label& L_aligned_copy, Label& L_copy_bytes) {
1207 Label L_loop, L_copy_last_bytes;
1209 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1210 __ andcc(end_from, 7, G1); // misaligned bytes
1211 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1212 __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1214 const Register left_shift = G1; // left shift bit counter
1215 const Register right_shift = G5; // right shift bit counter
1217 __ sll(G1, LogBitsPerByte, left_shift);
1218 __ mov(64, right_shift);
1219 __ sub(right_shift, left_shift, right_shift);
1221 //
1222 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1223 // to form 2 aligned 8-bytes chunks to store.
1224 //
1225 __ andn(end_from, 7, end_from); // Align address
1226 __ ldx(end_from, 0, O3);
1227 __ align(OptoLoopAlignment);
1228 __ BIND(L_loop);
1229 __ ldx(end_from, -8, O4);
1230 __ deccc(count, count_dec); // Can we do next iteration after this one?
1231 __ ldx(end_from, -16, G4);
1232 __ dec(end_to, 16);
1233 __ dec(end_from, 16);
1234 __ srlx(O3, right_shift, O3);
1235 __ sllx(O4, left_shift, G3);
1236 __ bset(G3, O3);
1237 __ stx(O3, end_to, 8);
1238 __ srlx(O4, right_shift, O4);
1239 __ sllx(G4, left_shift, G3);
1240 __ bset(G3, O4);
1241 __ stx(O4, end_to, 0);
1242 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1243 __ delayed()->mov(G4, O3);
1245 __ inccc(count, count_dec>>1 ); // + 8 bytes
1246 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1247 __ delayed()->inc(count, count_dec>>1); // restore 'count'
1249 // copy 8 bytes, part of them already loaded in O3
1250 __ ldx(end_from, -8, O4);
1251 __ dec(end_to, 8);
1252 __ dec(end_from, 8);
1253 __ srlx(O3, right_shift, O3);
1254 __ sllx(O4, left_shift, G3);
1255 __ bset(O3, G3);
1256 __ stx(G3, end_to, 0);
1258 __ BIND(L_copy_last_bytes);
1259 __ srl(left_shift, LogBitsPerByte, left_shift); // misaligned bytes
1260 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1261 __ delayed()->add(end_from, left_shift, end_from); // restore address
1262 }
1264 //
1265 // Generate stub for disjoint byte copy. If "aligned" is true, the
1266 // "from" and "to" addresses are assumed to be heapword aligned.
1267 //
1268 // Arguments for generated stub:
1269 // from: O0
1270 // to: O1
1271 // count: O2 treated as signed
1272 //
1273 address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
1274 __ align(CodeEntryAlignment);
1275 StubCodeMark mark(this, "StubRoutines", name);
1276 address start = __ pc();
1278 Label L_skip_alignment, L_align;
1279 Label L_copy_byte, L_copy_byte_loop, L_exit;
1281 const Register from = O0; // source array address
1282 const Register to = O1; // destination array address
1283 const Register count = O2; // elements count
1284 const Register offset = O5; // offset from start of arrays
1285 // O3, O4, G3, G4 are used as temp registers
1287 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1289 if (entry != NULL) {
1290 *entry = __ pc();
1291 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1292 BLOCK_COMMENT("Entry:");
1293 }
1295 // for short arrays, just do single element copy
1296 __ cmp(count, 23); // 16 + 7
1297 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1298 __ delayed()->mov(G0, offset);
1300 if (aligned) {
1301 // 'aligned' == true when it is known statically during compilation
1302 // of this arraycopy call site that both 'from' and 'to' addresses
1303 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1304 //
1305 // Aligned arrays have 4 bytes alignment in 32-bits VM
1306 // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1307 //
1308 #ifndef _LP64
1309 // copy a 4-bytes word if necessary to align 'to' to 8 bytes
1310 __ andcc(to, 7, G0);
1311 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
1312 __ delayed()->ld(from, 0, O3);
1313 __ inc(from, 4);
1314 __ inc(to, 4);
1315 __ dec(count, 4);
1316 __ st(O3, to, -4);
1317 __ BIND(L_skip_alignment);
1318 #endif
1319 } else {
1320 // copy bytes to align 'to' on 8 byte boundary
1321 __ andcc(to, 7, G1); // misaligned bytes
1322 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1323 __ delayed()->neg(G1);
1324 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment
1325 __ sub(count, G1, count);
1326 __ BIND(L_align);
1327 __ ldub(from, 0, O3);
1328 __ deccc(G1);
1329 __ inc(from);
1330 __ stb(O3, to, 0);
1331 __ br(Assembler::notZero, false, Assembler::pt, L_align);
1332 __ delayed()->inc(to);
1333 __ BIND(L_skip_alignment);
1334 }
1335 #ifdef _LP64
1336 if (!aligned)
1337 #endif
1338 {
1339 // Copy with shift 16 bytes per iteration if arrays do not have
1340 // the same alignment mod 8, otherwise fall through to the next
1341 // code for aligned copy.
1342 // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1343 // Also jump over aligned copy after the copy with shift completed.
1345 copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte);
1346 }
1348 // Both array are 8 bytes aligned, copy 16 bytes at a time
1349 __ and3(count, 7, G4); // Save count
1350 __ srl(count, 3, count);
1351 generate_disjoint_long_copy_core(aligned);
1352 __ mov(G4, count); // Restore count
1354 // copy tailing bytes
1355 __ BIND(L_copy_byte);
1356 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1357 __ delayed()->nop();
1358 __ align(OptoLoopAlignment);
1359 __ BIND(L_copy_byte_loop);
1360 __ ldub(from, offset, O3);
1361 __ deccc(count);
1362 __ stb(O3, to, offset);
1363 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1364 __ delayed()->inc(offset);
1366 __ BIND(L_exit);
1367 // O3, O4 are used as temp registers
1368 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1369 __ retl();
1370 __ delayed()->mov(G0, O0); // return 0
1371 return start;
1372 }
1374 //
1375 // Generate stub for conjoint byte copy. If "aligned" is true, the
1376 // "from" and "to" addresses are assumed to be heapword aligned.
1377 //
1378 // Arguments for generated stub:
1379 // from: O0
1380 // to: O1
1381 // count: O2 treated as signed
1382 //
1383 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1384 address *entry, const char *name) {
1385 // Do reverse copy.
1387 __ align(CodeEntryAlignment);
1388 StubCodeMark mark(this, "StubRoutines", name);
1389 address start = __ pc();
1391 Label L_skip_alignment, L_align, L_aligned_copy;
1392 Label L_copy_byte, L_copy_byte_loop, L_exit;
1394 const Register from = O0; // source array address
1395 const Register to = O1; // destination array address
1396 const Register count = O2; // elements count
1397 const Register end_from = from; // source array end address
1398 const Register end_to = to; // destination array end address
1400 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1402 if (entry != NULL) {
1403 *entry = __ pc();
1404 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1405 BLOCK_COMMENT("Entry:");
1406 }
1408 array_overlap_test(nooverlap_target, 0);
1410 __ add(to, count, end_to); // offset after last copied element
1412 // for short arrays, just do single element copy
1413 __ cmp(count, 23); // 16 + 7
1414 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1415 __ delayed()->add(from, count, end_from);
1417 {
1418 // Align end of arrays since they could be not aligned even
1419 // when arrays itself are aligned.
1421 // copy bytes to align 'end_to' on 8 byte boundary
1422 __ andcc(end_to, 7, G1); // misaligned bytes
1423 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1424 __ delayed()->nop();
1425 __ sub(count, G1, count);
1426 __ BIND(L_align);
1427 __ dec(end_from);
1428 __ dec(end_to);
1429 __ ldub(end_from, 0, O3);
1430 __ deccc(G1);
1431 __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1432 __ delayed()->stb(O3, end_to, 0);
1433 __ BIND(L_skip_alignment);
1434 }
1435 #ifdef _LP64
1436 if (aligned) {
1437 // Both arrays are aligned to 8-bytes in 64-bits VM.
1438 // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1439 // in unaligned case.
1440 __ dec(count, 16);
1441 } else
1442 #endif
1443 {
1444 // Copy with shift 16 bytes per iteration if arrays do not have
1445 // the same alignment mod 8, otherwise jump to the next
1446 // code for aligned copy (and substracting 16 from 'count' before jump).
1447 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1448 // Also jump over aligned copy after the copy with shift completed.
1450 copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1451 L_aligned_copy, L_copy_byte);
1452 }
1453 // copy 4 elements (16 bytes) at a time
1454 __ align(OptoLoopAlignment);
1455 __ BIND(L_aligned_copy);
1456 __ dec(end_from, 16);
1457 __ ldx(end_from, 8, O3);
1458 __ ldx(end_from, 0, O4);
1459 __ dec(end_to, 16);
1460 __ deccc(count, 16);
1461 __ stx(O3, end_to, 8);
1462 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1463 __ delayed()->stx(O4, end_to, 0);
1464 __ inc(count, 16);
1466 // copy 1 element (2 bytes) at a time
1467 __ BIND(L_copy_byte);
1468 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1469 __ delayed()->nop();
1470 __ align(OptoLoopAlignment);
1471 __ BIND(L_copy_byte_loop);
1472 __ dec(end_from);
1473 __ dec(end_to);
1474 __ ldub(end_from, 0, O4);
1475 __ deccc(count);
1476 __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1477 __ delayed()->stb(O4, end_to, 0);
1479 __ BIND(L_exit);
1480 // O3, O4 are used as temp registers
1481 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1482 __ retl();
1483 __ delayed()->mov(G0, O0); // return 0
1484 return start;
1485 }
1487 //
1488 // Generate stub for disjoint short copy. If "aligned" is true, the
1489 // "from" and "to" addresses are assumed to be heapword aligned.
1490 //
1491 // Arguments for generated stub:
1492 // from: O0
1493 // to: O1
1494 // count: O2 treated as signed
1495 //
1496 address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
1497 __ align(CodeEntryAlignment);
1498 StubCodeMark mark(this, "StubRoutines", name);
1499 address start = __ pc();
1501 Label L_skip_alignment, L_skip_alignment2;
1502 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1504 const Register from = O0; // source array address
1505 const Register to = O1; // destination array address
1506 const Register count = O2; // elements count
1507 const Register offset = O5; // offset from start of arrays
1508 // O3, O4, G3, G4 are used as temp registers
1510 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1512 if (entry != NULL) {
1513 *entry = __ pc();
1514 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1515 BLOCK_COMMENT("Entry:");
1516 }
1518 // for short arrays, just do single element copy
1519 __ cmp(count, 11); // 8 + 3 (22 bytes)
1520 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1521 __ delayed()->mov(G0, offset);
1523 if (aligned) {
1524 // 'aligned' == true when it is known statically during compilation
1525 // of this arraycopy call site that both 'from' and 'to' addresses
1526 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1527 //
1528 // Aligned arrays have 4 bytes alignment in 32-bits VM
1529 // and 8 bytes - in 64-bits VM.
1530 //
1531 #ifndef _LP64
1532 // copy a 2-elements word if necessary to align 'to' to 8 bytes
1533 __ andcc(to, 7, G0);
1534 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1535 __ delayed()->ld(from, 0, O3);
1536 __ inc(from, 4);
1537 __ inc(to, 4);
1538 __ dec(count, 2);
1539 __ st(O3, to, -4);
1540 __ BIND(L_skip_alignment);
1541 #endif
1542 } else {
1543 // copy 1 element if necessary to align 'to' on an 4 bytes
1544 __ andcc(to, 3, G0);
1545 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1546 __ delayed()->lduh(from, 0, O3);
1547 __ inc(from, 2);
1548 __ inc(to, 2);
1549 __ dec(count);
1550 __ sth(O3, to, -2);
1551 __ BIND(L_skip_alignment);
1553 // copy 2 elements to align 'to' on an 8 byte boundary
1554 __ andcc(to, 7, G0);
1555 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1556 __ delayed()->lduh(from, 0, O3);
1557 __ dec(count, 2);
1558 __ lduh(from, 2, O4);
1559 __ inc(from, 4);
1560 __ inc(to, 4);
1561 __ sth(O3, to, -4);
1562 __ sth(O4, to, -2);
1563 __ BIND(L_skip_alignment2);
1564 }
1565 #ifdef _LP64
1566 if (!aligned)
1567 #endif
1568 {
1569 // Copy with shift 16 bytes per iteration if arrays do not have
1570 // the same alignment mod 8, otherwise fall through to the next
1571 // code for aligned copy.
1572 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1573 // Also jump over aligned copy after the copy with shift completed.
1575 copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes);
1576 }
1578 // Both array are 8 bytes aligned, copy 16 bytes at a time
1579 __ and3(count, 3, G4); // Save
1580 __ srl(count, 2, count);
1581 generate_disjoint_long_copy_core(aligned);
1582 __ mov(G4, count); // restore
1584 // copy 1 element at a time
1585 __ BIND(L_copy_2_bytes);
1586 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1587 __ delayed()->nop();
1588 __ align(OptoLoopAlignment);
1589 __ BIND(L_copy_2_bytes_loop);
1590 __ lduh(from, offset, O3);
1591 __ deccc(count);
1592 __ sth(O3, to, offset);
1593 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1594 __ delayed()->inc(offset, 2);
1596 __ BIND(L_exit);
1597 // O3, O4 are used as temp registers
1598 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1599 __ retl();
1600 __ delayed()->mov(G0, O0); // return 0
1601 return start;
1602 }
1604 //
1605 // Generate stub for disjoint short fill. If "aligned" is true, the
1606 // "to" address is assumed to be heapword aligned.
1607 //
1608 // Arguments for generated stub:
1609 // to: O0
1610 // value: O1
1611 // count: O2 treated as signed
1612 //
1613 address generate_fill(BasicType t, bool aligned, const char* name) {
1614 __ align(CodeEntryAlignment);
1615 StubCodeMark mark(this, "StubRoutines", name);
1616 address start = __ pc();
1618 const Register to = O0; // source array address
1619 const Register value = O1; // fill value
1620 const Register count = O2; // elements count
1621 // O3 is used as a temp register
1623 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1625 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
1626 Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
1628 int shift = -1;
1629 switch (t) {
1630 case T_BYTE:
1631 shift = 2;
1632 break;
1633 case T_SHORT:
1634 shift = 1;
1635 break;
1636 case T_INT:
1637 shift = 0;
1638 break;
1639 default: ShouldNotReachHere();
1640 }
1642 BLOCK_COMMENT("Entry:");
1644 if (t == T_BYTE) {
1645 // Zero extend value
1646 __ and3(value, 0xff, value);
1647 __ sllx(value, 8, O3);
1648 __ or3(value, O3, value);
1649 }
1650 if (t == T_SHORT) {
1651 // Zero extend value
1652 __ sllx(value, 48, value);
1653 __ srlx(value, 48, value);
1654 }
1655 if (t == T_BYTE || t == T_SHORT) {
1656 __ sllx(value, 16, O3);
1657 __ or3(value, O3, value);
1658 }
1660 __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
1661 __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
1662 __ delayed()->andcc(count, 1, G0);
1664 if (!aligned && (t == T_BYTE || t == T_SHORT)) {
1665 // align source address at 4 bytes address boundary
1666 if (t == T_BYTE) {
1667 // One byte misalignment happens only for byte arrays
1668 __ andcc(to, 1, G0);
1669 __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
1670 __ delayed()->nop();
1671 __ stb(value, to, 0);
1672 __ inc(to, 1);
1673 __ dec(count, 1);
1674 __ BIND(L_skip_align1);
1675 }
1676 // Two bytes misalignment happens only for byte and short (char) arrays
1677 __ andcc(to, 2, G0);
1678 __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
1679 __ delayed()->nop();
1680 __ sth(value, to, 0);
1681 __ inc(to, 2);
1682 __ dec(count, 1 << (shift - 1));
1683 __ BIND(L_skip_align2);
1684 }
1685 #ifdef _LP64
1686 if (!aligned) {
1687 #endif
1688 // align to 8 bytes, we know we are 4 byte aligned to start
1689 __ andcc(to, 7, G0);
1690 __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
1691 __ delayed()->nop();
1692 __ stw(value, to, 0);
1693 __ inc(to, 4);
1694 __ dec(count, 1 << shift);
1695 __ BIND(L_fill_32_bytes);
1696 #ifdef _LP64
1697 }
1698 #endif
1700 if (t == T_INT) {
1701 // Zero extend value
1702 __ srl(value, 0, value);
1703 }
1704 if (t == T_BYTE || t == T_SHORT || t == T_INT) {
1705 __ sllx(value, 32, O3);
1706 __ or3(value, O3, value);
1707 }
1709 Label L_check_fill_8_bytes;
1710 // Fill 32-byte chunks
1711 __ subcc(count, 8 << shift, count);
1712 __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
1713 __ delayed()->nop();
1715 Label L_fill_32_bytes_loop, L_fill_4_bytes;
1716 __ align(16);
1717 __ BIND(L_fill_32_bytes_loop);
1719 __ stx(value, to, 0);
1720 __ stx(value, to, 8);
1721 __ stx(value, to, 16);
1722 __ stx(value, to, 24);
1724 __ subcc(count, 8 << shift, count);
1725 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
1726 __ delayed()->add(to, 32, to);
1728 __ BIND(L_check_fill_8_bytes);
1729 __ addcc(count, 8 << shift, count);
1730 __ brx(Assembler::zero, false, Assembler::pn, L_exit);
1731 __ delayed()->subcc(count, 1 << (shift + 1), count);
1732 __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
1733 __ delayed()->andcc(count, 1<<shift, G0);
1735 //
1736 // length is too short, just fill 8 bytes at a time
1737 //
1738 Label L_fill_8_bytes_loop;
1739 __ BIND(L_fill_8_bytes_loop);
1740 __ stx(value, to, 0);
1741 __ subcc(count, 1 << (shift + 1), count);
1742 __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
1743 __ delayed()->add(to, 8, to);
1745 // fill trailing 4 bytes
1746 __ andcc(count, 1<<shift, G0); // in delay slot of branches
1747 if (t == T_INT) {
1748 __ BIND(L_fill_elements);
1749 }
1750 __ BIND(L_fill_4_bytes);
1751 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
1752 if (t == T_BYTE || t == T_SHORT) {
1753 __ delayed()->andcc(count, 1<<(shift-1), G0);
1754 } else {
1755 __ delayed()->nop();
1756 }
1757 __ stw(value, to, 0);
1758 if (t == T_BYTE || t == T_SHORT) {
1759 __ inc(to, 4);
1760 // fill trailing 2 bytes
1761 __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
1762 __ BIND(L_fill_2_bytes);
1763 __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
1764 __ delayed()->andcc(count, 1, count);
1765 __ sth(value, to, 0);
1766 if (t == T_BYTE) {
1767 __ inc(to, 2);
1768 // fill trailing byte
1769 __ andcc(count, 1, count); // in delay slot of branches
1770 __ BIND(L_fill_byte);
1771 __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1772 __ delayed()->nop();
1773 __ stb(value, to, 0);
1774 } else {
1775 __ BIND(L_fill_byte);
1776 }
1777 } else {
1778 __ BIND(L_fill_2_bytes);
1779 }
1780 __ BIND(L_exit);
1781 __ retl();
1782 __ delayed()->nop();
1784 // Handle copies less than 8 bytes. Int is handled elsewhere.
1785 if (t == T_BYTE) {
1786 __ BIND(L_fill_elements);
1787 Label L_fill_2, L_fill_4;
1788 // in delay slot __ andcc(count, 1, G0);
1789 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1790 __ delayed()->andcc(count, 2, G0);
1791 __ stb(value, to, 0);
1792 __ inc(to, 1);
1793 __ BIND(L_fill_2);
1794 __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
1795 __ delayed()->andcc(count, 4, G0);
1796 __ stb(value, to, 0);
1797 __ stb(value, to, 1);
1798 __ inc(to, 2);
1799 __ BIND(L_fill_4);
1800 __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1801 __ delayed()->nop();
1802 __ stb(value, to, 0);
1803 __ stb(value, to, 1);
1804 __ stb(value, to, 2);
1805 __ retl();
1806 __ delayed()->stb(value, to, 3);
1807 }
1809 if (t == T_SHORT) {
1810 Label L_fill_2;
1811 __ BIND(L_fill_elements);
1812 // in delay slot __ andcc(count, 1, G0);
1813 __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1814 __ delayed()->andcc(count, 2, G0);
1815 __ sth(value, to, 0);
1816 __ inc(to, 2);
1817 __ BIND(L_fill_2);
1818 __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1819 __ delayed()->nop();
1820 __ sth(value, to, 0);
1821 __ retl();
1822 __ delayed()->sth(value, to, 2);
1823 }
1824 return start;
1825 }
1827 //
1828 // Generate stub for conjoint short copy. If "aligned" is true, the
1829 // "from" and "to" addresses are assumed to be heapword aligned.
1830 //
1831 // Arguments for generated stub:
1832 // from: O0
1833 // to: O1
1834 // count: O2 treated as signed
1835 //
1836 address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1837 address *entry, const char *name) {
1838 // Do reverse copy.
1840 __ align(CodeEntryAlignment);
1841 StubCodeMark mark(this, "StubRoutines", name);
1842 address start = __ pc();
1844 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1845 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1847 const Register from = O0; // source array address
1848 const Register to = O1; // destination array address
1849 const Register count = O2; // elements count
1850 const Register end_from = from; // source array end address
1851 const Register end_to = to; // destination array end address
1853 const Register byte_count = O3; // bytes count to copy
1855 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1857 if (entry != NULL) {
1858 *entry = __ pc();
1859 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1860 BLOCK_COMMENT("Entry:");
1861 }
1863 array_overlap_test(nooverlap_target, 1);
1865 __ sllx(count, LogBytesPerShort, byte_count);
1866 __ add(to, byte_count, end_to); // offset after last copied element
1868 // for short arrays, just do single element copy
1869 __ cmp(count, 11); // 8 + 3 (22 bytes)
1870 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1871 __ delayed()->add(from, byte_count, end_from);
1873 {
1874 // Align end of arrays since they could be not aligned even
1875 // when arrays itself are aligned.
1877 // copy 1 element if necessary to align 'end_to' on an 4 bytes
1878 __ andcc(end_to, 3, G0);
1879 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1880 __ delayed()->lduh(end_from, -2, O3);
1881 __ dec(end_from, 2);
1882 __ dec(end_to, 2);
1883 __ dec(count);
1884 __ sth(O3, end_to, 0);
1885 __ BIND(L_skip_alignment);
1887 // copy 2 elements to align 'end_to' on an 8 byte boundary
1888 __ andcc(end_to, 7, G0);
1889 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1890 __ delayed()->lduh(end_from, -2, O3);
1891 __ dec(count, 2);
1892 __ lduh(end_from, -4, O4);
1893 __ dec(end_from, 4);
1894 __ dec(end_to, 4);
1895 __ sth(O3, end_to, 2);
1896 __ sth(O4, end_to, 0);
1897 __ BIND(L_skip_alignment2);
1898 }
1899 #ifdef _LP64
1900 if (aligned) {
1901 // Both arrays are aligned to 8-bytes in 64-bits VM.
1902 // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1903 // in unaligned case.
1904 __ dec(count, 8);
1905 } else
1906 #endif
1907 {
1908 // Copy with shift 16 bytes per iteration if arrays do not have
1909 // the same alignment mod 8, otherwise jump to the next
1910 // code for aligned copy (and substracting 8 from 'count' before jump).
1911 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1912 // Also jump over aligned copy after the copy with shift completed.
1914 copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
1915 L_aligned_copy, L_copy_2_bytes);
1916 }
1917 // copy 4 elements (16 bytes) at a time
1918 __ align(OptoLoopAlignment);
1919 __ BIND(L_aligned_copy);
1920 __ dec(end_from, 16);
1921 __ ldx(end_from, 8, O3);
1922 __ ldx(end_from, 0, O4);
1923 __ dec(end_to, 16);
1924 __ deccc(count, 8);
1925 __ stx(O3, end_to, 8);
1926 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1927 __ delayed()->stx(O4, end_to, 0);
1928 __ inc(count, 8);
1930 // copy 1 element (2 bytes) at a time
1931 __ BIND(L_copy_2_bytes);
1932 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1933 __ delayed()->nop();
1934 __ BIND(L_copy_2_bytes_loop);
1935 __ dec(end_from, 2);
1936 __ dec(end_to, 2);
1937 __ lduh(end_from, 0, O4);
1938 __ deccc(count);
1939 __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
1940 __ delayed()->sth(O4, end_to, 0);
1942 __ BIND(L_exit);
1943 // O3, O4 are used as temp registers
1944 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1945 __ retl();
1946 __ delayed()->mov(G0, O0); // return 0
1947 return start;
1948 }
1950 //
1951 // Generate core code for disjoint int copy (and oop copy on 32-bit).
1952 // If "aligned" is true, the "from" and "to" addresses are assumed
1953 // to be heapword aligned.
1954 //
1955 // Arguments:
1956 // from: O0
1957 // to: O1
1958 // count: O2 treated as signed
1959 //
1960 void generate_disjoint_int_copy_core(bool aligned) {
1962 Label L_skip_alignment, L_aligned_copy;
1963 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1965 const Register from = O0; // source array address
1966 const Register to = O1; // destination array address
1967 const Register count = O2; // elements count
1968 const Register offset = O5; // offset from start of arrays
1969 // O3, O4, G3, G4 are used as temp registers
1971 // 'aligned' == true when it is known statically during compilation
1972 // of this arraycopy call site that both 'from' and 'to' addresses
1973 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1974 //
1975 // Aligned arrays have 4 bytes alignment in 32-bits VM
1976 // and 8 bytes - in 64-bits VM.
1977 //
1978 #ifdef _LP64
1979 if (!aligned)
1980 #endif
1981 {
1982 // The next check could be put under 'ifndef' since the code in
1983 // generate_disjoint_long_copy_core() has own checks and set 'offset'.
1985 // for short arrays, just do single element copy
1986 __ cmp(count, 5); // 4 + 1 (20 bytes)
1987 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1988 __ delayed()->mov(G0, offset);
1990 // copy 1 element to align 'to' on an 8 byte boundary
1991 __ andcc(to, 7, G0);
1992 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1993 __ delayed()->ld(from, 0, O3);
1994 __ inc(from, 4);
1995 __ inc(to, 4);
1996 __ dec(count);
1997 __ st(O3, to, -4);
1998 __ BIND(L_skip_alignment);
2000 // if arrays have same alignment mod 8, do 4 elements copy
2001 __ andcc(from, 7, G0);
2002 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2003 __ delayed()->ld(from, 0, O3);
2005 //
2006 // Load 2 aligned 8-bytes chunks and use one from previous iteration
2007 // to form 2 aligned 8-bytes chunks to store.
2008 //
2009 // copy_16_bytes_forward_with_shift() is not used here since this
2010 // code is more optimal.
2012 // copy with shift 4 elements (16 bytes) at a time
2013 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4
2015 __ align(OptoLoopAlignment);
2016 __ BIND(L_copy_16_bytes);
2017 __ ldx(from, 4, O4);
2018 __ deccc(count, 4); // Can we do next iteration after this one?
2019 __ ldx(from, 12, G4);
2020 __ inc(to, 16);
2021 __ inc(from, 16);
2022 __ sllx(O3, 32, O3);
2023 __ srlx(O4, 32, G3);
2024 __ bset(G3, O3);
2025 __ stx(O3, to, -16);
2026 __ sllx(O4, 32, O4);
2027 __ srlx(G4, 32, G3);
2028 __ bset(G3, O4);
2029 __ stx(O4, to, -8);
2030 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2031 __ delayed()->mov(G4, O3);
2033 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2034 __ delayed()->inc(count, 4); // restore 'count'
2036 __ BIND(L_aligned_copy);
2037 }
2038 // copy 4 elements (16 bytes) at a time
2039 __ and3(count, 1, G4); // Save
2040 __ srl(count, 1, count);
2041 generate_disjoint_long_copy_core(aligned);
2042 __ mov(G4, count); // Restore
2044 // copy 1 element at a time
2045 __ BIND(L_copy_4_bytes);
2046 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
2047 __ delayed()->nop();
2048 __ BIND(L_copy_4_bytes_loop);
2049 __ ld(from, offset, O3);
2050 __ deccc(count);
2051 __ st(O3, to, offset);
2052 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
2053 __ delayed()->inc(offset, 4);
2054 __ BIND(L_exit);
2055 }
2057 //
2058 // Generate stub for disjoint int copy. If "aligned" is true, the
2059 // "from" and "to" addresses are assumed to be heapword aligned.
2060 //
2061 // Arguments for generated stub:
2062 // from: O0
2063 // to: O1
2064 // count: O2 treated as signed
2065 //
2066 address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
2067 __ align(CodeEntryAlignment);
2068 StubCodeMark mark(this, "StubRoutines", name);
2069 address start = __ pc();
2071 const Register count = O2;
2072 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2074 if (entry != NULL) {
2075 *entry = __ pc();
2076 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2077 BLOCK_COMMENT("Entry:");
2078 }
2080 generate_disjoint_int_copy_core(aligned);
2082 // O3, O4 are used as temp registers
2083 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2084 __ retl();
2085 __ delayed()->mov(G0, O0); // return 0
2086 return start;
2087 }
2089 //
2090 // Generate core code for conjoint int copy (and oop copy on 32-bit).
2091 // If "aligned" is true, the "from" and "to" addresses are assumed
2092 // to be heapword aligned.
2093 //
2094 // Arguments:
2095 // from: O0
2096 // to: O1
2097 // count: O2 treated as signed
2098 //
2099 void generate_conjoint_int_copy_core(bool aligned) {
2100 // Do reverse copy.
2102 Label L_skip_alignment, L_aligned_copy;
2103 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
2105 const Register from = O0; // source array address
2106 const Register to = O1; // destination array address
2107 const Register count = O2; // elements count
2108 const Register end_from = from; // source array end address
2109 const Register end_to = to; // destination array end address
2110 // O3, O4, O5, G3 are used as temp registers
2112 const Register byte_count = O3; // bytes count to copy
2114 __ sllx(count, LogBytesPerInt, byte_count);
2115 __ add(to, byte_count, end_to); // offset after last copied element
2117 __ cmp(count, 5); // for short arrays, just do single element copy
2118 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
2119 __ delayed()->add(from, byte_count, end_from);
2121 // copy 1 element to align 'to' on an 8 byte boundary
2122 __ andcc(end_to, 7, G0);
2123 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
2124 __ delayed()->nop();
2125 __ dec(count);
2126 __ dec(end_from, 4);
2127 __ dec(end_to, 4);
2128 __ ld(end_from, 0, O4);
2129 __ st(O4, end_to, 0);
2130 __ BIND(L_skip_alignment);
2132 // Check if 'end_from' and 'end_to' has the same alignment.
2133 __ andcc(end_from, 7, G0);
2134 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2135 __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
2137 // copy with shift 4 elements (16 bytes) at a time
2138 //
2139 // Load 2 aligned 8-bytes chunks and use one from previous iteration
2140 // to form 2 aligned 8-bytes chunks to store.
2141 //
2142 __ ldx(end_from, -4, O3);
2143 __ align(OptoLoopAlignment);
2144 __ BIND(L_copy_16_bytes);
2145 __ ldx(end_from, -12, O4);
2146 __ deccc(count, 4);
2147 __ ldx(end_from, -20, O5);
2148 __ dec(end_to, 16);
2149 __ dec(end_from, 16);
2150 __ srlx(O3, 32, O3);
2151 __ sllx(O4, 32, G3);
2152 __ bset(G3, O3);
2153 __ stx(O3, end_to, 8);
2154 __ srlx(O4, 32, O4);
2155 __ sllx(O5, 32, G3);
2156 __ bset(O4, G3);
2157 __ stx(G3, end_to, 0);
2158 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2159 __ delayed()->mov(O5, O3);
2161 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2162 __ delayed()->inc(count, 4);
2164 // copy 4 elements (16 bytes) at a time
2165 __ align(OptoLoopAlignment);
2166 __ BIND(L_aligned_copy);
2167 __ dec(end_from, 16);
2168 __ ldx(end_from, 8, O3);
2169 __ ldx(end_from, 0, O4);
2170 __ dec(end_to, 16);
2171 __ deccc(count, 4);
2172 __ stx(O3, end_to, 8);
2173 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2174 __ delayed()->stx(O4, end_to, 0);
2175 __ inc(count, 4);
2177 // copy 1 element (4 bytes) at a time
2178 __ BIND(L_copy_4_bytes);
2179 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
2180 __ delayed()->nop();
2181 __ BIND(L_copy_4_bytes_loop);
2182 __ dec(end_from, 4);
2183 __ dec(end_to, 4);
2184 __ ld(end_from, 0, O4);
2185 __ deccc(count);
2186 __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
2187 __ delayed()->st(O4, end_to, 0);
2188 __ BIND(L_exit);
2189 }
2191 //
2192 // Generate stub for conjoint int copy. If "aligned" is true, the
2193 // "from" and "to" addresses are assumed to be heapword aligned.
2194 //
2195 // Arguments for generated stub:
2196 // from: O0
2197 // to: O1
2198 // count: O2 treated as signed
2199 //
2200 address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
2201 address *entry, const char *name) {
2202 __ align(CodeEntryAlignment);
2203 StubCodeMark mark(this, "StubRoutines", name);
2204 address start = __ pc();
2206 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2208 if (entry != NULL) {
2209 *entry = __ pc();
2210 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2211 BLOCK_COMMENT("Entry:");
2212 }
2214 array_overlap_test(nooverlap_target, 2);
2216 generate_conjoint_int_copy_core(aligned);
2218 // O3, O4 are used as temp registers
2219 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2220 __ retl();
2221 __ delayed()->mov(G0, O0); // return 0
2222 return start;
2223 }
2225 //
2226 // Generate core code for disjoint long copy (and oop copy on 64-bit).
2227 // "aligned" is ignored, because we must make the stronger
2228 // assumption that both addresses are always 64-bit aligned.
2229 //
2230 // Arguments:
2231 // from: O0
2232 // to: O1
2233 // count: O2 treated as signed
2234 //
2235 // count -= 2;
2236 // if ( count >= 0 ) { // >= 2 elements
2237 // if ( count > 6) { // >= 8 elements
2238 // count -= 6; // original count - 8
2239 // do {
2240 // copy_8_elements;
2241 // count -= 8;
2242 // } while ( count >= 0 );
2243 // count += 6;
2244 // }
2245 // if ( count >= 0 ) { // >= 2 elements
2246 // do {
2247 // copy_2_elements;
2248 // } while ( (count=count-2) >= 0 );
2249 // }
2250 // }
2251 // count += 2;
2252 // if ( count != 0 ) { // 1 element left
2253 // copy_1_element;
2254 // }
2255 //
2256 void generate_disjoint_long_copy_core(bool aligned) {
2257 Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2258 const Register from = O0; // source array address
2259 const Register to = O1; // destination array address
2260 const Register count = O2; // elements count
2261 const Register offset0 = O4; // element offset
2262 const Register offset8 = O5; // next element offset
2264 __ deccc(count, 2);
2265 __ mov(G0, offset0); // offset from start of arrays (0)
2266 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2267 __ delayed()->add(offset0, 8, offset8);
2269 // Copy by 64 bytes chunks
2270 Label L_copy_64_bytes;
2271 const Register from64 = O3; // source address
2272 const Register to64 = G3; // destination address
2273 __ subcc(count, 6, O3);
2274 __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
2275 __ delayed()->mov(to, to64);
2276 // Now we can use O4(offset0), O5(offset8) as temps
2277 __ mov(O3, count);
2278 __ mov(from, from64);
2280 __ align(OptoLoopAlignment);
2281 __ BIND(L_copy_64_bytes);
2282 for( int off = 0; off < 64; off += 16 ) {
2283 __ ldx(from64, off+0, O4);
2284 __ ldx(from64, off+8, O5);
2285 __ stx(O4, to64, off+0);
2286 __ stx(O5, to64, off+8);
2287 }
2288 __ deccc(count, 8);
2289 __ inc(from64, 64);
2290 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
2291 __ delayed()->inc(to64, 64);
2293 // Restore O4(offset0), O5(offset8)
2294 __ sub(from64, from, offset0);
2295 __ inccc(count, 6);
2296 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2297 __ delayed()->add(offset0, 8, offset8);
2299 // Copy by 16 bytes chunks
2300 __ align(OptoLoopAlignment);
2301 __ BIND(L_copy_16_bytes);
2302 __ ldx(from, offset0, O3);
2303 __ ldx(from, offset8, G3);
2304 __ deccc(count, 2);
2305 __ stx(O3, to, offset0);
2306 __ inc(offset0, 16);
2307 __ stx(G3, to, offset8);
2308 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2309 __ delayed()->inc(offset8, 16);
2311 // Copy last 8 bytes
2312 __ BIND(L_copy_8_bytes);
2313 __ inccc(count, 2);
2314 __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2315 __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2316 __ ldx(from, offset0, O3);
2317 __ stx(O3, to, offset0);
2318 __ BIND(L_exit);
2319 }
2321 //
2322 // Generate stub for disjoint long copy.
2323 // "aligned" is ignored, because we must make the stronger
2324 // assumption that both addresses are always 64-bit aligned.
2325 //
2326 // Arguments for generated stub:
2327 // from: O0
2328 // to: O1
2329 // count: O2 treated as signed
2330 //
2331 address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
2332 __ align(CodeEntryAlignment);
2333 StubCodeMark mark(this, "StubRoutines", name);
2334 address start = __ pc();
2336 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2338 if (entry != NULL) {
2339 *entry = __ pc();
2340 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2341 BLOCK_COMMENT("Entry:");
2342 }
2344 generate_disjoint_long_copy_core(aligned);
2346 // O3, O4 are used as temp registers
2347 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2348 __ retl();
2349 __ delayed()->mov(G0, O0); // return 0
2350 return start;
2351 }
2353 //
2354 // Generate core code for conjoint long copy (and oop copy on 64-bit).
2355 // "aligned" is ignored, because we must make the stronger
2356 // assumption that both addresses are always 64-bit aligned.
2357 //
2358 // Arguments:
2359 // from: O0
2360 // to: O1
2361 // count: O2 treated as signed
2362 //
2363 void generate_conjoint_long_copy_core(bool aligned) {
2364 // Do reverse copy.
2365 Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2366 const Register from = O0; // source array address
2367 const Register to = O1; // destination array address
2368 const Register count = O2; // elements count
2369 const Register offset8 = O4; // element offset
2370 const Register offset0 = O5; // previous element offset
2372 __ subcc(count, 1, count);
2373 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2374 __ delayed()->sllx(count, LogBytesPerLong, offset8);
2375 __ sub(offset8, 8, offset0);
2376 __ align(OptoLoopAlignment);
2377 __ BIND(L_copy_16_bytes);
2378 __ ldx(from, offset8, O2);
2379 __ ldx(from, offset0, O3);
2380 __ stx(O2, to, offset8);
2381 __ deccc(offset8, 16); // use offset8 as counter
2382 __ stx(O3, to, offset0);
2383 __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2384 __ delayed()->dec(offset0, 16);
2386 __ BIND(L_copy_8_bytes);
2387 __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2388 __ delayed()->nop();
2389 __ ldx(from, 0, O3);
2390 __ stx(O3, to, 0);
2391 __ BIND(L_exit);
2392 }
2394 // Generate stub for conjoint long copy.
2395 // "aligned" is ignored, because we must make the stronger
2396 // assumption that both addresses are always 64-bit aligned.
2397 //
2398 // Arguments for generated stub:
2399 // from: O0
2400 // to: O1
2401 // count: O2 treated as signed
2402 //
2403 address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2404 address *entry, const char *name) {
2405 __ align(CodeEntryAlignment);
2406 StubCodeMark mark(this, "StubRoutines", name);
2407 address start = __ pc();
2409 assert(!aligned, "usage");
2411 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2413 if (entry != NULL) {
2414 *entry = __ pc();
2415 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2416 BLOCK_COMMENT("Entry:");
2417 }
2419 array_overlap_test(nooverlap_target, 3);
2421 generate_conjoint_long_copy_core(aligned);
2423 // O3, O4 are used as temp registers
2424 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2425 __ retl();
2426 __ delayed()->mov(G0, O0); // return 0
2427 return start;
2428 }
2430 // Generate stub for disjoint oop copy. If "aligned" is true, the
2431 // "from" and "to" addresses are assumed to be heapword aligned.
2432 //
2433 // Arguments for generated stub:
2434 // from: O0
2435 // to: O1
2436 // count: O2 treated as signed
2437 //
2438 address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name) {
2440 const Register from = O0; // source array address
2441 const Register to = O1; // destination array address
2442 const Register count = O2; // elements count
2444 __ align(CodeEntryAlignment);
2445 StubCodeMark mark(this, "StubRoutines", name);
2446 address start = __ pc();
2448 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2450 if (entry != NULL) {
2451 *entry = __ pc();
2452 // caller can pass a 64-bit byte count here
2453 BLOCK_COMMENT("Entry:");
2454 }
2456 // save arguments for barrier generation
2457 __ mov(to, G1);
2458 __ mov(count, G5);
2459 gen_write_ref_array_pre_barrier(G1, G5);
2460 #ifdef _LP64
2461 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2462 if (UseCompressedOops) {
2463 generate_disjoint_int_copy_core(aligned);
2464 } else {
2465 generate_disjoint_long_copy_core(aligned);
2466 }
2467 #else
2468 generate_disjoint_int_copy_core(aligned);
2469 #endif
2470 // O0 is used as temp register
2471 gen_write_ref_array_post_barrier(G1, G5, O0);
2473 // O3, O4 are used as temp registers
2474 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2475 __ retl();
2476 __ delayed()->mov(G0, O0); // return 0
2477 return start;
2478 }
2480 // Generate stub for conjoint oop copy. If "aligned" is true, the
2481 // "from" and "to" addresses are assumed to be heapword aligned.
2482 //
2483 // Arguments for generated stub:
2484 // from: O0
2485 // to: O1
2486 // count: O2 treated as signed
2487 //
2488 address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2489 address *entry, const char *name) {
2491 const Register from = O0; // source array address
2492 const Register to = O1; // destination array address
2493 const Register count = O2; // elements count
2495 __ align(CodeEntryAlignment);
2496 StubCodeMark mark(this, "StubRoutines", name);
2497 address start = __ pc();
2499 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2501 if (entry != NULL) {
2502 *entry = __ pc();
2503 // caller can pass a 64-bit byte count here
2504 BLOCK_COMMENT("Entry:");
2505 }
2507 array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2509 // save arguments for barrier generation
2510 __ mov(to, G1);
2511 __ mov(count, G5);
2512 gen_write_ref_array_pre_barrier(G1, G5);
2514 #ifdef _LP64
2515 if (UseCompressedOops) {
2516 generate_conjoint_int_copy_core(aligned);
2517 } else {
2518 generate_conjoint_long_copy_core(aligned);
2519 }
2520 #else
2521 generate_conjoint_int_copy_core(aligned);
2522 #endif
2524 // O0 is used as temp register
2525 gen_write_ref_array_post_barrier(G1, G5, O0);
2527 // O3, O4 are used as temp registers
2528 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2529 __ retl();
2530 __ delayed()->mov(G0, O0); // return 0
2531 return start;
2532 }
2535 // Helper for generating a dynamic type check.
2536 // Smashes only the given temp registers.
2537 void generate_type_check(Register sub_klass,
2538 Register super_check_offset,
2539 Register super_klass,
2540 Register temp,
2541 Label& L_success) {
2542 assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2544 BLOCK_COMMENT("type_check:");
2546 Label L_miss, L_pop_to_miss;
2548 assert_clean_int(super_check_offset, temp);
2550 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
2551 &L_success, &L_miss, NULL,
2552 super_check_offset);
2554 BLOCK_COMMENT("type_check_slow_path:");
2555 __ save_frame(0);
2556 __ check_klass_subtype_slow_path(sub_klass->after_save(),
2557 super_klass->after_save(),
2558 L0, L1, L2, L4,
2559 NULL, &L_pop_to_miss);
2560 __ ba(false, L_success);
2561 __ delayed()->restore();
2563 __ bind(L_pop_to_miss);
2564 __ restore();
2566 // Fall through on failure!
2567 __ BIND(L_miss);
2568 }
2571 // Generate stub for checked oop copy.
2572 //
2573 // Arguments for generated stub:
2574 // from: O0
2575 // to: O1
2576 // count: O2 treated as signed
2577 // ckoff: O3 (super_check_offset)
2578 // ckval: O4 (super_klass)
2579 // ret: O0 zero for success; (-1^K) where K is partial transfer count
2580 //
2581 address generate_checkcast_copy(const char *name, address *entry) {
2583 const Register O0_from = O0; // source array address
2584 const Register O1_to = O1; // destination array address
2585 const Register O2_count = O2; // elements count
2586 const Register O3_ckoff = O3; // super_check_offset
2587 const Register O4_ckval = O4; // super_klass
2589 const Register O5_offset = O5; // loop var, with stride wordSize
2590 const Register G1_remain = G1; // loop var, with stride -1
2591 const Register G3_oop = G3; // actual oop copied
2592 const Register G4_klass = G4; // oop._klass
2593 const Register G5_super = G5; // oop._klass._primary_supers[ckval]
2595 __ align(CodeEntryAlignment);
2596 StubCodeMark mark(this, "StubRoutines", name);
2597 address start = __ pc();
2599 #ifdef ASSERT
2600 // We sometimes save a frame (see generate_type_check below).
2601 // If this will cause trouble, let's fail now instead of later.
2602 __ save_frame(0);
2603 __ restore();
2604 #endif
2606 assert_clean_int(O2_count, G1); // Make sure 'count' is clean int.
2608 #ifdef ASSERT
2609 // caller guarantees that the arrays really are different
2610 // otherwise, we would have to make conjoint checks
2611 { Label L;
2612 __ mov(O3, G1); // spill: overlap test smashes O3
2613 __ mov(O4, G4); // spill: overlap test smashes O4
2614 array_overlap_test(L, LogBytesPerHeapOop);
2615 __ stop("checkcast_copy within a single array");
2616 __ bind(L);
2617 __ mov(G1, O3);
2618 __ mov(G4, O4);
2619 }
2620 #endif //ASSERT
2622 if (entry != NULL) {
2623 *entry = __ pc();
2624 // caller can pass a 64-bit byte count here (from generic stub)
2625 BLOCK_COMMENT("Entry:");
2626 }
2628 gen_write_ref_array_pre_barrier(O1_to, O2_count);
2630 Label load_element, store_element, do_card_marks, fail, done;
2631 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it
2632 __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2633 __ delayed()->mov(G0, O5_offset); // offset from start of arrays
2635 // Empty array: Nothing to do.
2636 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2637 __ retl();
2638 __ delayed()->set(0, O0); // return 0 on (trivial) success
2640 // ======== begin loop ========
2641 // (Loop is rotated; its entry is load_element.)
2642 // Loop variables:
2643 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2644 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2645 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2646 __ align(OptoLoopAlignment);
2648 __ BIND(store_element);
2649 __ deccc(G1_remain); // decrement the count
2650 __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
2651 __ inc(O5_offset, heapOopSize); // step to next offset
2652 __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
2653 __ delayed()->set(0, O0); // return -1 on success
2655 // ======== loop entry is here ========
2656 __ BIND(load_element);
2657 __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop
2658 __ br_null(G3_oop, true, Assembler::pt, store_element);
2659 __ delayed()->nop();
2661 __ load_klass(G3_oop, G4_klass); // query the object klass
2663 generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2664 // branch to this on success:
2665 store_element);
2666 // ======== end loop ========
2668 // It was a real error; we must depend on the caller to finish the job.
2669 // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2670 // Emit GC store barriers for the oops we have copied (O2 minus G1),
2671 // and report their number to the caller.
2672 __ BIND(fail);
2673 __ subcc(O2_count, G1_remain, O2_count);
2674 __ brx(Assembler::zero, false, Assembler::pt, done);
2675 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller
2677 __ BIND(do_card_marks);
2678 gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2]
2680 __ BIND(done);
2681 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2682 __ retl();
2683 __ delayed()->nop(); // return value in 00
2685 return start;
2686 }
2689 // Generate 'unsafe' array copy stub
2690 // Though just as safe as the other stubs, it takes an unscaled
2691 // size_t argument instead of an element count.
2692 //
2693 // Arguments for generated stub:
2694 // from: O0
2695 // to: O1
2696 // count: O2 byte count, treated as ssize_t, can be zero
2697 //
2698 // Examines the alignment of the operands and dispatches
2699 // to a long, int, short, or byte copy loop.
2700 //
2701 address generate_unsafe_copy(const char* name,
2702 address byte_copy_entry,
2703 address short_copy_entry,
2704 address int_copy_entry,
2705 address long_copy_entry) {
2707 const Register O0_from = O0; // source array address
2708 const Register O1_to = O1; // destination array address
2709 const Register O2_count = O2; // elements count
2711 const Register G1_bits = G1; // test copy of low bits
2713 __ align(CodeEntryAlignment);
2714 StubCodeMark mark(this, "StubRoutines", name);
2715 address start = __ pc();
2717 // bump this on entry, not on exit:
2718 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2720 __ or3(O0_from, O1_to, G1_bits);
2721 __ or3(O2_count, G1_bits, G1_bits);
2723 __ btst(BytesPerLong-1, G1_bits);
2724 __ br(Assembler::zero, true, Assembler::pt,
2725 long_copy_entry, relocInfo::runtime_call_type);
2726 // scale the count on the way out:
2727 __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2729 __ btst(BytesPerInt-1, G1_bits);
2730 __ br(Assembler::zero, true, Assembler::pt,
2731 int_copy_entry, relocInfo::runtime_call_type);
2732 // scale the count on the way out:
2733 __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2735 __ btst(BytesPerShort-1, G1_bits);
2736 __ br(Assembler::zero, true, Assembler::pt,
2737 short_copy_entry, relocInfo::runtime_call_type);
2738 // scale the count on the way out:
2739 __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2741 __ br(Assembler::always, false, Assembler::pt,
2742 byte_copy_entry, relocInfo::runtime_call_type);
2743 __ delayed()->nop();
2745 return start;
2746 }
2749 // Perform range checks on the proposed arraycopy.
2750 // Kills the two temps, but nothing else.
2751 // Also, clean the sign bits of src_pos and dst_pos.
2752 void arraycopy_range_checks(Register src, // source array oop (O0)
2753 Register src_pos, // source position (O1)
2754 Register dst, // destination array oo (O2)
2755 Register dst_pos, // destination position (O3)
2756 Register length, // length of copy (O4)
2757 Register temp1, Register temp2,
2758 Label& L_failed) {
2759 BLOCK_COMMENT("arraycopy_range_checks:");
2761 // if (src_pos + length > arrayOop(src)->length() ) FAIL;
2763 const Register array_length = temp1; // scratch
2764 const Register end_pos = temp2; // scratch
2766 // Note: This next instruction may be in the delay slot of a branch:
2767 __ add(length, src_pos, end_pos); // src_pos + length
2768 __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2769 __ cmp(end_pos, array_length);
2770 __ br(Assembler::greater, false, Assembler::pn, L_failed);
2772 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2773 __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2774 __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2775 __ cmp(end_pos, array_length);
2776 __ br(Assembler::greater, false, Assembler::pn, L_failed);
2778 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2779 // Move with sign extension can be used since they are positive.
2780 __ delayed()->signx(src_pos, src_pos);
2781 __ signx(dst_pos, dst_pos);
2783 BLOCK_COMMENT("arraycopy_range_checks done");
2784 }
2787 //
2788 // Generate generic array copy stubs
2789 //
2790 // Input:
2791 // O0 - src oop
2792 // O1 - src_pos
2793 // O2 - dst oop
2794 // O3 - dst_pos
2795 // O4 - element count
2796 //
2797 // Output:
2798 // O0 == 0 - success
2799 // O0 == -1 - need to call System.arraycopy
2800 //
2801 address generate_generic_copy(const char *name,
2802 address entry_jbyte_arraycopy,
2803 address entry_jshort_arraycopy,
2804 address entry_jint_arraycopy,
2805 address entry_oop_arraycopy,
2806 address entry_jlong_arraycopy,
2807 address entry_checkcast_arraycopy) {
2808 Label L_failed, L_objArray;
2810 // Input registers
2811 const Register src = O0; // source array oop
2812 const Register src_pos = O1; // source position
2813 const Register dst = O2; // destination array oop
2814 const Register dst_pos = O3; // destination position
2815 const Register length = O4; // elements count
2817 // registers used as temp
2818 const Register G3_src_klass = G3; // source array klass
2819 const Register G4_dst_klass = G4; // destination array klass
2820 const Register G5_lh = G5; // layout handler
2821 const Register O5_temp = O5;
2823 __ align(CodeEntryAlignment);
2824 StubCodeMark mark(this, "StubRoutines", name);
2825 address start = __ pc();
2827 // bump this on entry, not on exit:
2828 inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2830 // In principle, the int arguments could be dirty.
2831 //assert_clean_int(src_pos, G1);
2832 //assert_clean_int(dst_pos, G1);
2833 //assert_clean_int(length, G1);
2835 //-----------------------------------------------------------------------
2836 // Assembler stubs will be used for this call to arraycopy
2837 // if the following conditions are met:
2838 //
2839 // (1) src and dst must not be null.
2840 // (2) src_pos must not be negative.
2841 // (3) dst_pos must not be negative.
2842 // (4) length must not be negative.
2843 // (5) src klass and dst klass should be the same and not NULL.
2844 // (6) src and dst should be arrays.
2845 // (7) src_pos + length must not exceed length of src.
2846 // (8) dst_pos + length must not exceed length of dst.
2847 BLOCK_COMMENT("arraycopy initial argument checks");
2849 // if (src == NULL) return -1;
2850 __ br_null(src, false, Assembler::pn, L_failed);
2852 // if (src_pos < 0) return -1;
2853 __ delayed()->tst(src_pos);
2854 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2855 __ delayed()->nop();
2857 // if (dst == NULL) return -1;
2858 __ br_null(dst, false, Assembler::pn, L_failed);
2860 // if (dst_pos < 0) return -1;
2861 __ delayed()->tst(dst_pos);
2862 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2864 // if (length < 0) return -1;
2865 __ delayed()->tst(length);
2866 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2868 BLOCK_COMMENT("arraycopy argument klass checks");
2869 // get src->klass()
2870 if (UseCompressedOops) {
2871 __ delayed()->nop(); // ??? not good
2872 __ load_klass(src, G3_src_klass);
2873 } else {
2874 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
2875 }
2877 #ifdef ASSERT
2878 // assert(src->klass() != NULL);
2879 BLOCK_COMMENT("assert klasses not null");
2880 { Label L_a, L_b;
2881 __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
2882 __ delayed()->nop();
2883 __ bind(L_a);
2884 __ stop("broken null klass");
2885 __ bind(L_b);
2886 __ load_klass(dst, G4_dst_klass);
2887 __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
2888 __ delayed()->mov(G0, G4_dst_klass); // scribble the temp
2889 BLOCK_COMMENT("assert done");
2890 }
2891 #endif
2893 // Load layout helper
2894 //
2895 // |array_tag| | header_size | element_type | |log2_element_size|
2896 // 32 30 24 16 8 2 0
2897 //
2898 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2899 //
2901 int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2902 Klass::layout_helper_offset_in_bytes();
2904 // Load 32-bits signed value. Use br() instruction with it to check icc.
2905 __ lduw(G3_src_klass, lh_offset, G5_lh);
2907 if (UseCompressedOops) {
2908 __ load_klass(dst, G4_dst_klass);
2909 }
2910 // Handle objArrays completely differently...
2911 juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2912 __ set(objArray_lh, O5_temp);
2913 __ cmp(G5_lh, O5_temp);
2914 __ br(Assembler::equal, false, Assembler::pt, L_objArray);
2915 if (UseCompressedOops) {
2916 __ delayed()->nop();
2917 } else {
2918 __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
2919 }
2921 // if (src->klass() != dst->klass()) return -1;
2922 __ cmp(G3_src_klass, G4_dst_klass);
2923 __ brx(Assembler::notEqual, false, Assembler::pn, L_failed);
2924 __ delayed()->nop();
2926 // if (!src->is_Array()) return -1;
2927 __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
2928 __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
2930 // At this point, it is known to be a typeArray (array_tag 0x3).
2931 #ifdef ASSERT
2932 __ delayed()->nop();
2933 { Label L;
2934 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2935 __ set(lh_prim_tag_in_place, O5_temp);
2936 __ cmp(G5_lh, O5_temp);
2937 __ br(Assembler::greaterEqual, false, Assembler::pt, L);
2938 __ delayed()->nop();
2939 __ stop("must be a primitive array");
2940 __ bind(L);
2941 }
2942 #else
2943 __ delayed(); // match next insn to prev branch
2944 #endif
2946 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2947 O5_temp, G4_dst_klass, L_failed);
2949 // typeArrayKlass
2950 //
2951 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2952 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2953 //
2955 const Register G4_offset = G4_dst_klass; // array offset
2956 const Register G3_elsize = G3_src_klass; // log2 element size
2958 __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
2959 __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
2960 __ add(src, G4_offset, src); // src array offset
2961 __ add(dst, G4_offset, dst); // dst array offset
2962 __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
2964 // next registers should be set before the jump to corresponding stub
2965 const Register from = O0; // source array address
2966 const Register to = O1; // destination array address
2967 const Register count = O2; // elements count
2969 // 'from', 'to', 'count' registers should be set in this order
2970 // since they are the same as 'src', 'src_pos', 'dst'.
2972 BLOCK_COMMENT("scale indexes to element size");
2973 __ sll_ptr(src_pos, G3_elsize, src_pos);
2974 __ sll_ptr(dst_pos, G3_elsize, dst_pos);
2975 __ add(src, src_pos, from); // src_addr
2976 __ add(dst, dst_pos, to); // dst_addr
2978 BLOCK_COMMENT("choose copy loop based on element size");
2979 __ cmp(G3_elsize, 0);
2980 __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy);
2981 __ delayed()->signx(length, count); // length
2983 __ cmp(G3_elsize, LogBytesPerShort);
2984 __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy);
2985 __ delayed()->signx(length, count); // length
2987 __ cmp(G3_elsize, LogBytesPerInt);
2988 __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy);
2989 __ delayed()->signx(length, count); // length
2990 #ifdef ASSERT
2991 { Label L;
2992 __ cmp(G3_elsize, LogBytesPerLong);
2993 __ br(Assembler::equal, false, Assembler::pt, L);
2994 __ delayed()->nop();
2995 __ stop("must be long copy, but elsize is wrong");
2996 __ bind(L);
2997 }
2998 #endif
2999 __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy);
3000 __ delayed()->signx(length, count); // length
3002 // objArrayKlass
3003 __ BIND(L_objArray);
3004 // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
3006 Label L_plain_copy, L_checkcast_copy;
3007 // test array classes for subtyping
3008 __ cmp(G3_src_klass, G4_dst_klass); // usual case is exact equality
3009 __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
3010 __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
3012 // Identically typed arrays can be copied without element-wise checks.
3013 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3014 O5_temp, G5_lh, L_failed);
3016 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
3017 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
3018 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
3019 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
3020 __ add(src, src_pos, from); // src_addr
3021 __ add(dst, dst_pos, to); // dst_addr
3022 __ BIND(L_plain_copy);
3023 __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy);
3024 __ delayed()->signx(length, count); // length
3026 __ BIND(L_checkcast_copy);
3027 // live at this point: G3_src_klass, G4_dst_klass
3028 {
3029 // Before looking at dst.length, make sure dst is also an objArray.
3030 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
3031 __ cmp(G5_lh, O5_temp);
3032 __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
3034 // It is safe to examine both src.length and dst.length.
3035 __ delayed(); // match next insn to prev branch
3036 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3037 O5_temp, G5_lh, L_failed);
3039 // Marshal the base address arguments now, freeing registers.
3040 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
3041 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
3042 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
3043 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
3044 __ add(src, src_pos, from); // src_addr
3045 __ add(dst, dst_pos, to); // dst_addr
3046 __ signx(length, count); // length (reloaded)
3048 Register sco_temp = O3; // this register is free now
3049 assert_different_registers(from, to, count, sco_temp,
3050 G4_dst_klass, G3_src_klass);
3052 // Generate the type check.
3053 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
3054 Klass::super_check_offset_offset_in_bytes());
3055 __ lduw(G4_dst_klass, sco_offset, sco_temp);
3056 generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
3057 O5_temp, L_plain_copy);
3059 // Fetch destination element klass from the objArrayKlass header.
3060 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
3061 objArrayKlass::element_klass_offset_in_bytes());
3063 // the checkcast_copy loop needs two extra arguments:
3064 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass
3065 // lduw(O4, sco_offset, O3); // sco of elem klass
3067 __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
3068 __ delayed()->lduw(O4, sco_offset, O3);
3069 }
3071 __ BIND(L_failed);
3072 __ retl();
3073 __ delayed()->sub(G0, 1, O0); // return -1
3074 return start;
3075 }
3077 void generate_arraycopy_stubs() {
3078 address entry;
3079 address entry_jbyte_arraycopy;
3080 address entry_jshort_arraycopy;
3081 address entry_jint_arraycopy;
3082 address entry_oop_arraycopy;
3083 address entry_jlong_arraycopy;
3084 address entry_checkcast_arraycopy;
3086 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
3087 "jbyte_disjoint_arraycopy");
3088 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
3089 "jbyte_arraycopy");
3090 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
3091 "jshort_disjoint_arraycopy");
3092 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
3093 "jshort_arraycopy");
3094 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry,
3095 "jint_disjoint_arraycopy");
3096 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, &entry_jint_arraycopy,
3097 "jint_arraycopy");
3098 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, &entry,
3099 "jlong_disjoint_arraycopy");
3100 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, entry, &entry_jlong_arraycopy,
3101 "jlong_arraycopy");
3102 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry,
3103 "oop_disjoint_arraycopy");
3104 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3105 "oop_arraycopy");
3108 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
3109 "arrayof_jbyte_disjoint_arraycopy");
3110 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL,
3111 "arrayof_jbyte_arraycopy");
3113 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
3114 "arrayof_jshort_disjoint_arraycopy");
3115 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL,
3116 "arrayof_jshort_arraycopy");
3118 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
3119 "arrayof_jint_disjoint_arraycopy");
3120 #ifdef _LP64
3121 // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
3122 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, NULL, "arrayof_jint_arraycopy");
3123 #else
3124 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
3125 #endif
3127 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, NULL,
3128 "arrayof_jlong_disjoint_arraycopy");
3129 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, NULL,
3130 "arrayof_oop_disjoint_arraycopy");
3132 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
3133 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
3135 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
3136 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
3137 entry_jbyte_arraycopy,
3138 entry_jshort_arraycopy,
3139 entry_jint_arraycopy,
3140 entry_jlong_arraycopy);
3141 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
3142 entry_jbyte_arraycopy,
3143 entry_jshort_arraycopy,
3144 entry_jint_arraycopy,
3145 entry_oop_arraycopy,
3146 entry_jlong_arraycopy,
3147 entry_checkcast_arraycopy);
3149 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3150 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3151 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3152 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3153 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3154 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3155 }
3157 void generate_initial() {
3158 // Generates all stubs and initializes the entry points
3160 //------------------------------------------------------------------------------------------------------------------------
3161 // entry points that exist in all platforms
3162 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
3163 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
3164 StubRoutines::_forward_exception_entry = generate_forward_exception();
3166 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
3167 StubRoutines::_catch_exception_entry = generate_catch_exception();
3169 //------------------------------------------------------------------------------------------------------------------------
3170 // entry points that are platform specific
3171 StubRoutines::Sparc::_test_stop_entry = generate_test_stop();
3173 StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine();
3174 StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
3176 #if !defined(COMPILER2) && !defined(_LP64)
3177 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
3178 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
3179 StubRoutines::_atomic_add_entry = generate_atomic_add();
3180 StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
3181 StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
3182 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3183 StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
3184 #endif // COMPILER2 !=> _LP64
3185 }
3188 void generate_all() {
3189 // Generates all stubs and initializes the entry points
3191 // Generate partial_subtype_check first here since its code depends on
3192 // UseZeroBaseCompressedOops which is defined after heap initialization.
3193 StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check();
3194 // These entry points require SharedInfo::stack0 to be set up in non-core builds
3195 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
3196 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
3197 StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
3198 StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
3199 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3200 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
3202 StubRoutines::_handler_for_unsafe_access_entry =
3203 generate_handler_for_unsafe_access();
3205 // support for verify_oop (must happen after universe_init)
3206 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
3208 // arraycopy stubs used by compilers
3209 generate_arraycopy_stubs();
3211 // Don't initialize the platform math functions since sparc
3212 // doesn't have intrinsics for these operations.
3213 }
3216 public:
3217 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3218 // replace the standard masm with a special one:
3219 _masm = new MacroAssembler(code);
3221 _stub_count = !all ? 0x100 : 0x200;
3222 if (all) {
3223 generate_all();
3224 } else {
3225 generate_initial();
3226 }
3228 // make sure this stub is available for all local calls
3229 if (_atomic_add_stub.is_unbound()) {
3230 // generate a second time, if necessary
3231 (void) generate_atomic_add();
3232 }
3233 }
3236 private:
3237 int _stub_count;
3238 void stub_prolog(StubCodeDesc* cdesc) {
3239 # ifdef ASSERT
3240 // put extra information in the stub code, to make it more readable
3241 #ifdef _LP64
3242 // Write the high part of the address
3243 // [RGV] Check if there is a dependency on the size of this prolog
3244 __ emit_data((intptr_t)cdesc >> 32, relocInfo::none);
3245 #endif
3246 __ emit_data((intptr_t)cdesc, relocInfo::none);
3247 __ emit_data(++_stub_count, relocInfo::none);
3248 # endif
3249 align(true);
3250 }
3252 void align(bool at_header = false) {
3253 // %%%%% move this constant somewhere else
3254 // UltraSPARC cache line size is 8 instructions:
3255 const unsigned int icache_line_size = 32;
3256 const unsigned int icache_half_line_size = 16;
3258 if (at_header) {
3259 while ((intptr_t)(__ pc()) % icache_line_size != 0) {
3260 __ emit_data(0, relocInfo::none);
3261 }
3262 } else {
3263 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
3264 __ nop();
3265 }
3266 }
3267 }
3269 }; // end class declaration
3271 void StubGenerator_generate(CodeBuffer* code, bool all) {
3272 StubGenerator g(code, all);
3273 }