Wed, 08 Apr 2009 10:56:49 -0700
6655638: dynamic languages need method handles
Summary: initial implementation, with known omissions (x86/64, sparc, compiler optim., c-oops, C++ interp.)
Reviewed-by: kvn, twisti, never
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_stubGenerator_sparc.cpp.incl"
28 // Declaration and definition of StubGenerator (no .hpp file).
29 // For a more detailed description of the stub routine structure
30 // see the comment in stubRoutines.hpp.
32 #define __ _masm->
34 #ifdef PRODUCT
35 #define BLOCK_COMMENT(str) /* nothing */
36 #else
37 #define BLOCK_COMMENT(str) __ block_comment(str)
38 #endif
40 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
42 // Note: The register L7 is used as L7_thread_cache, and may not be used
43 // any other way within this module.
46 static const Register& Lstub_temp = L2;
48 // -------------------------------------------------------------------------------------------------------------------------
49 // Stub Code definitions
51 static address handle_unsafe_access() {
52 JavaThread* thread = JavaThread::current();
53 address pc = thread->saved_exception_pc();
54 address npc = thread->saved_exception_npc();
55 // pc is the instruction which we must emulate
56 // doing a no-op is fine: return garbage from the load
58 // request an async exception
59 thread->set_pending_unsafe_access_error();
61 // return address of next instruction to execute
62 return npc;
63 }
65 class StubGenerator: public StubCodeGenerator {
66 private:
68 #ifdef PRODUCT
69 #define inc_counter_np(a,b,c) (0)
70 #else
71 void inc_counter_np_(int& counter, Register t1, Register t2) {
72 Address counter_addr(t2, (address) &counter);
73 __ sethi(counter_addr);
74 __ ld(counter_addr, t1);
75 __ inc(t1);
76 __ st(t1, counter_addr);
77 }
78 #define inc_counter_np(counter, t1, t2) \
79 BLOCK_COMMENT("inc_counter " #counter); \
80 inc_counter_np_(counter, t1, t2);
81 #endif
83 //----------------------------------------------------------------------------------------------------
84 // Call stubs are used to call Java from C
86 address generate_call_stub(address& return_pc) {
87 StubCodeMark mark(this, "StubRoutines", "call_stub");
88 address start = __ pc();
90 // Incoming arguments:
91 //
92 // o0 : call wrapper address
93 // o1 : result (address)
94 // o2 : result type
95 // o3 : method
96 // o4 : (interpreter) entry point
97 // o5 : parameters (address)
98 // [sp + 0x5c]: parameter size (in words)
99 // [sp + 0x60]: thread
100 //
101 // +---------------+ <--- sp + 0
102 // | |
103 // . reg save area .
104 // | |
105 // +---------------+ <--- sp + 0x40
106 // | |
107 // . extra 7 slots .
108 // | |
109 // +---------------+ <--- sp + 0x5c
110 // | param. size |
111 // +---------------+ <--- sp + 0x60
112 // | thread |
113 // +---------------+
114 // | |
116 // note: if the link argument position changes, adjust
117 // the code in frame::entry_frame_call_wrapper()
119 const Argument link = Argument(0, false); // used only for GC
120 const Argument result = Argument(1, false);
121 const Argument result_type = Argument(2, false);
122 const Argument method = Argument(3, false);
123 const Argument entry_point = Argument(4, false);
124 const Argument parameters = Argument(5, false);
125 const Argument parameter_size = Argument(6, false);
126 const Argument thread = Argument(7, false);
128 // setup thread register
129 __ ld_ptr(thread.as_address(), G2_thread);
130 __ reinit_heapbase();
132 #ifdef ASSERT
133 // make sure we have no pending exceptions
134 { const Register t = G3_scratch;
135 Label L;
136 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
137 __ br_null(t, false, Assembler::pt, L);
138 __ delayed()->nop();
139 __ stop("StubRoutines::call_stub: entered with pending exception");
140 __ bind(L);
141 }
142 #endif
144 // create activation frame & allocate space for parameters
145 { const Register t = G3_scratch;
146 __ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words)
147 __ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words)
148 __ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words)
149 __ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
150 __ neg(t); // negate so it can be used with save
151 __ save(SP, t, SP); // setup new frame
152 }
154 // +---------------+ <--- sp + 0
155 // | |
156 // . reg save area .
157 // | |
158 // +---------------+ <--- sp + 0x40
159 // | |
160 // . extra 7 slots .
161 // | |
162 // +---------------+ <--- sp + 0x5c
163 // | empty slot | (only if parameter size is even)
164 // +---------------+
165 // | |
166 // . parameters .
167 // | |
168 // +---------------+ <--- fp + 0
169 // | |
170 // . reg save area .
171 // | |
172 // +---------------+ <--- fp + 0x40
173 // | |
174 // . extra 7 slots .
175 // | |
176 // +---------------+ <--- fp + 0x5c
177 // | param. size |
178 // +---------------+ <--- fp + 0x60
179 // | thread |
180 // +---------------+
181 // | |
183 // pass parameters if any
184 BLOCK_COMMENT("pass parameters if any");
185 { const Register src = parameters.as_in().as_register();
186 const Register dst = Lentry_args;
187 const Register tmp = G3_scratch;
188 const Register cnt = G4_scratch;
190 // test if any parameters & setup of Lentry_args
191 Label exit;
192 __ ld_ptr(parameter_size.as_in().as_address(), cnt); // parameter counter
193 __ add( FP, STACK_BIAS, dst );
194 __ tst(cnt);
195 __ br(Assembler::zero, false, Assembler::pn, exit);
196 __ delayed()->sub(dst, BytesPerWord, dst); // setup Lentry_args
198 // copy parameters if any
199 Label loop;
200 __ BIND(loop);
201 // Store tag first.
202 if (TaggedStackInterpreter) {
203 __ ld_ptr(src, 0, tmp);
204 __ add(src, BytesPerWord, src); // get next
205 __ st_ptr(tmp, dst, Interpreter::tag_offset_in_bytes());
206 }
207 // Store parameter value
208 __ ld_ptr(src, 0, tmp);
209 __ add(src, BytesPerWord, src);
210 __ st_ptr(tmp, dst, Interpreter::value_offset_in_bytes());
211 __ deccc(cnt);
212 __ br(Assembler::greater, false, Assembler::pt, loop);
213 __ delayed()->sub(dst, Interpreter::stackElementSize(), dst);
215 // done
216 __ BIND(exit);
217 }
219 // setup parameters, method & call Java function
220 #ifdef ASSERT
221 // layout_activation_impl checks it's notion of saved SP against
222 // this register, so if this changes update it as well.
223 const Register saved_SP = Lscratch;
224 __ mov(SP, saved_SP); // keep track of SP before call
225 #endif
227 // setup parameters
228 const Register t = G3_scratch;
229 __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
230 __ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
231 __ sub(FP, t, Gargs); // setup parameter pointer
232 #ifdef _LP64
233 __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
234 #endif
235 __ mov(SP, O5_savedSP);
238 // do the call
239 //
240 // the following register must be setup:
241 //
242 // G2_thread
243 // G5_method
244 // Gargs
245 BLOCK_COMMENT("call Java function");
246 __ jmpl(entry_point.as_in().as_register(), G0, O7);
247 __ delayed()->mov(method.as_in().as_register(), G5_method); // setup method
249 BLOCK_COMMENT("call_stub_return_address:");
250 return_pc = __ pc();
252 // The callee, if it wasn't interpreted, can return with SP changed so
253 // we can no longer assert of change of SP.
255 // store result depending on type
256 // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
257 // is treated as T_INT)
258 { const Register addr = result .as_in().as_register();
259 const Register type = result_type.as_in().as_register();
260 Label is_long, is_float, is_double, is_object, exit;
261 __ cmp(type, T_OBJECT); __ br(Assembler::equal, false, Assembler::pn, is_object);
262 __ delayed()->cmp(type, T_FLOAT); __ br(Assembler::equal, false, Assembler::pn, is_float);
263 __ delayed()->cmp(type, T_DOUBLE); __ br(Assembler::equal, false, Assembler::pn, is_double);
264 __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long);
265 __ delayed()->nop();
267 // store int result
268 __ st(O0, addr, G0);
270 __ BIND(exit);
271 __ ret();
272 __ delayed()->restore();
274 __ BIND(is_object);
275 __ ba(false, exit);
276 __ delayed()->st_ptr(O0, addr, G0);
278 __ BIND(is_float);
279 __ ba(false, exit);
280 __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
282 __ BIND(is_double);
283 __ ba(false, exit);
284 __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
286 __ BIND(is_long);
287 #ifdef _LP64
288 __ ba(false, exit);
289 __ delayed()->st_long(O0, addr, G0); // store entire long
290 #else
291 #if defined(COMPILER2)
292 // All return values are where we want them, except for Longs. C2 returns
293 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
294 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
295 // build we simply always use G1.
296 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
297 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
298 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
300 __ ba(false, exit);
301 __ delayed()->stx(G1, addr, G0); // store entire long
302 #else
303 __ st(O1, addr, BytesPerInt);
304 __ ba(false, exit);
305 __ delayed()->st(O0, addr, G0);
306 #endif /* COMPILER2 */
307 #endif /* _LP64 */
308 }
309 return start;
310 }
313 //----------------------------------------------------------------------------------------------------
314 // Return point for a Java call if there's an exception thrown in Java code.
315 // The exception is caught and transformed into a pending exception stored in
316 // JavaThread that can be tested from within the VM.
317 //
318 // Oexception: exception oop
320 address generate_catch_exception() {
321 StubCodeMark mark(this, "StubRoutines", "catch_exception");
323 address start = __ pc();
324 // verify that thread corresponds
325 __ verify_thread();
327 const Register& temp_reg = Gtemp;
328 Address pending_exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
329 Address exception_file_offset_addr(G2_thread, 0, in_bytes(Thread::exception_file_offset ()));
330 Address exception_line_offset_addr(G2_thread, 0, in_bytes(Thread::exception_line_offset ()));
332 // set pending exception
333 __ verify_oop(Oexception);
334 __ st_ptr(Oexception, pending_exception_addr);
335 __ set((intptr_t)__FILE__, temp_reg);
336 __ st_ptr(temp_reg, exception_file_offset_addr);
337 __ set((intptr_t)__LINE__, temp_reg);
338 __ st(temp_reg, exception_line_offset_addr);
340 // complete return to VM
341 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
343 Address stub_ret(temp_reg, StubRoutines::_call_stub_return_address);
344 __ jump_to(stub_ret);
345 __ delayed()->nop();
347 return start;
348 }
351 //----------------------------------------------------------------------------------------------------
352 // Continuation point for runtime calls returning with a pending exception
353 // The pending exception check happened in the runtime or native call stub
354 // The pending exception in Thread is converted into a Java-level exception
355 //
356 // Contract with Java-level exception handler: O0 = exception
357 // O1 = throwing pc
359 address generate_forward_exception() {
360 StubCodeMark mark(this, "StubRoutines", "forward_exception");
361 address start = __ pc();
363 // Upon entry, O7 has the return address returning into Java
364 // (interpreted or compiled) code; i.e. the return address
365 // becomes the throwing pc.
367 const Register& handler_reg = Gtemp;
369 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
371 #ifdef ASSERT
372 // make sure that this code is only executed if there is a pending exception
373 { Label L;
374 __ ld_ptr(exception_addr, Gtemp);
375 __ br_notnull(Gtemp, false, Assembler::pt, L);
376 __ delayed()->nop();
377 __ stop("StubRoutines::forward exception: no pending exception (1)");
378 __ bind(L);
379 }
380 #endif
382 // compute exception handler into handler_reg
383 __ get_thread();
384 __ ld_ptr(exception_addr, Oexception);
385 __ verify_oop(Oexception);
386 __ save_frame(0); // compensates for compiler weakness
387 __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
388 BLOCK_COMMENT("call exception_handler_for_return_address");
389 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Lscratch);
390 __ mov(O0, handler_reg);
391 __ restore(); // compensates for compiler weakness
393 __ ld_ptr(exception_addr, Oexception);
394 __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
396 #ifdef ASSERT
397 // make sure exception is set
398 { Label L;
399 __ br_notnull(Oexception, false, Assembler::pt, L);
400 __ delayed()->nop();
401 __ stop("StubRoutines::forward exception: no pending exception (2)");
402 __ bind(L);
403 }
404 #endif
405 // jump to exception handler
406 __ jmp(handler_reg, 0);
407 // clear pending exception
408 __ delayed()->st_ptr(G0, exception_addr);
410 return start;
411 }
414 //------------------------------------------------------------------------------------------------------------------------
415 // Continuation point for throwing of implicit exceptions that are not handled in
416 // the current activation. Fabricates an exception oop and initiates normal
417 // exception dispatching in this frame. Only callee-saved registers are preserved
418 // (through the normal register window / RegisterMap handling).
419 // If the compiler needs all registers to be preserved between the fault
420 // point and the exception handler then it must assume responsibility for that in
421 // AbstractCompiler::continuation_for_implicit_null_exception or
422 // continuation_for_implicit_division_by_zero_exception. All other implicit
423 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
424 // either at call sites or otherwise assume that stack unwinding will be initiated,
425 // so caller saved registers were assumed volatile in the compiler.
427 // Note that we generate only this stub into a RuntimeStub, because it needs to be
428 // properly traversed and ignored during GC, so we change the meaning of the "__"
429 // macro within this method.
430 #undef __
431 #define __ masm->
433 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {
434 #ifdef ASSERT
435 int insts_size = VerifyThread ? 1 * K : 600;
436 #else
437 int insts_size = VerifyThread ? 1 * K : 256;
438 #endif /* ASSERT */
439 int locs_size = 32;
441 CodeBuffer code(name, insts_size, locs_size);
442 MacroAssembler* masm = new MacroAssembler(&code);
444 __ verify_thread();
446 // This is an inlined and slightly modified version of call_VM
447 // which has the ability to fetch the return PC out of thread-local storage
448 __ assert_not_delayed();
450 // Note that we always push a frame because on the SPARC
451 // architecture, for all of our implicit exception kinds at call
452 // sites, the implicit exception is taken before the callee frame
453 // is pushed.
454 __ save_frame(0);
456 int frame_complete = __ offset();
458 if (restore_saved_exception_pc) {
459 Address saved_exception_pc(G2_thread, 0, in_bytes(JavaThread::saved_exception_pc_offset()));
460 __ ld_ptr(saved_exception_pc, I7);
461 __ sub(I7, frame::pc_return_offset, I7);
462 }
464 // Note that we always have a runtime stub frame on the top of stack by this point
465 Register last_java_sp = SP;
466 // 64-bit last_java_sp is biased!
467 __ set_last_Java_frame(last_java_sp, G0);
468 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early
469 __ save_thread(noreg);
470 // do the call
471 BLOCK_COMMENT("call runtime_entry");
472 __ call(runtime_entry, relocInfo::runtime_call_type);
473 if (!VerifyThread)
474 __ delayed()->mov(G2_thread, O0); // pass thread as first argument
475 else
476 __ delayed()->nop(); // (thread already passed)
477 __ restore_thread(noreg);
478 __ reset_last_Java_frame();
480 // check for pending exceptions. use Gtemp as scratch register.
481 #ifdef ASSERT
482 Label L;
484 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
485 Register scratch_reg = Gtemp;
486 __ ld_ptr(exception_addr, scratch_reg);
487 __ br_notnull(scratch_reg, false, Assembler::pt, L);
488 __ delayed()->nop();
489 __ should_not_reach_here();
490 __ bind(L);
491 #endif // ASSERT
492 BLOCK_COMMENT("call forward_exception_entry");
493 __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
494 // we use O7 linkage so that forward_exception_entry has the issuing PC
495 __ delayed()->restore();
497 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
498 return stub->entry_point();
499 }
501 #undef __
502 #define __ _masm->
505 // Generate a routine that sets all the registers so we
506 // can tell if the stop routine prints them correctly.
507 address generate_test_stop() {
508 StubCodeMark mark(this, "StubRoutines", "test_stop");
509 address start = __ pc();
511 int i;
513 __ save_frame(0);
515 static jfloat zero = 0.0, one = 1.0;
517 // put addr in L0, then load through L0 to F0
518 __ set((intptr_t)&zero, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F0);
519 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
521 // use add to put 2..18 in F2..F18
522 for ( i = 2; i <= 18; ++i ) {
523 __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1), as_FloatRegister(i));
524 }
526 // Now put double 2 in F16, double 18 in F18
527 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
528 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
530 // use add to put 20..32 in F20..F32
531 for (i = 20; i < 32; i += 2) {
532 __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i));
533 }
535 // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
536 for ( i = 0; i < 8; ++i ) {
537 if (i < 6) {
538 __ set( i, as_iRegister(i));
539 __ set(16 + i, as_oRegister(i));
540 __ set(24 + i, as_gRegister(i));
541 }
542 __ set( 8 + i, as_lRegister(i));
543 }
545 __ stop("testing stop");
548 __ ret();
549 __ delayed()->restore();
551 return start;
552 }
555 address generate_stop_subroutine() {
556 StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
557 address start = __ pc();
559 __ stop_subroutine();
561 return start;
562 }
564 address generate_flush_callers_register_windows() {
565 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
566 address start = __ pc();
568 __ flush_windows();
569 __ retl(false);
570 __ delayed()->add( FP, STACK_BIAS, O0 );
571 // The returned value must be a stack pointer whose register save area
572 // is flushed, and will stay flushed while the caller executes.
574 return start;
575 }
577 // Helper functions for v8 atomic operations.
578 //
579 void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
580 if (mark_oop_reg == noreg) {
581 address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
582 __ set((intptr_t)lock_ptr, lock_ptr_reg);
583 } else {
584 assert(scratch_reg != noreg, "just checking");
585 address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
586 __ set((intptr_t)lock_ptr, lock_ptr_reg);
587 __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
588 __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
589 }
590 }
592 void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
594 get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
595 __ set(StubRoutines::Sparc::locked, lock_reg);
596 // Initialize yield counter
597 __ mov(G0,yield_reg);
599 __ BIND(retry);
600 __ cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
601 __ br(Assembler::less, false, Assembler::pt, dontyield);
602 __ delayed()->nop();
604 // This code can only be called from inside the VM, this
605 // stub is only invoked from Atomic::add(). We do not
606 // want to use call_VM, because _last_java_sp and such
607 // must already be set.
608 //
609 // Save the regs and make space for a C call
610 __ save(SP, -96, SP);
611 __ save_all_globals_into_locals();
612 BLOCK_COMMENT("call os::naked_sleep");
613 __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
614 __ delayed()->nop();
615 __ restore_globals_from_locals();
616 __ restore();
617 // reset the counter
618 __ mov(G0,yield_reg);
620 __ BIND(dontyield);
622 // try to get lock
623 __ swap(lock_ptr_reg, 0, lock_reg);
625 // did we get the lock?
626 __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
627 __ br(Assembler::notEqual, true, Assembler::pn, retry);
628 __ delayed()->add(yield_reg,1,yield_reg);
630 // yes, got lock. do the operation here.
631 }
633 void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
634 __ st(lock_reg, lock_ptr_reg, 0); // unlock
635 }
637 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
638 //
639 // Arguments :
640 //
641 // exchange_value: O0
642 // dest: O1
643 //
644 // Results:
645 //
646 // O0: the value previously stored in dest
647 //
648 address generate_atomic_xchg() {
649 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
650 address start = __ pc();
652 if (UseCASForSwap) {
653 // Use CAS instead of swap, just in case the MP hardware
654 // prefers to work with just one kind of synch. instruction.
655 Label retry;
656 __ BIND(retry);
657 __ mov(O0, O3); // scratch copy of exchange value
658 __ ld(O1, 0, O2); // observe the previous value
659 // try to replace O2 with O3
660 __ cas_under_lock(O1, O2, O3,
661 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
662 __ cmp(O2, O3);
663 __ br(Assembler::notEqual, false, Assembler::pn, retry);
664 __ delayed()->nop();
666 __ retl(false);
667 __ delayed()->mov(O2, O0); // report previous value to caller
669 } else {
670 if (VM_Version::v9_instructions_work()) {
671 __ retl(false);
672 __ delayed()->swap(O1, 0, O0);
673 } else {
674 const Register& lock_reg = O2;
675 const Register& lock_ptr_reg = O3;
676 const Register& yield_reg = O4;
678 Label retry;
679 Label dontyield;
681 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
682 // got the lock, do the swap
683 __ swap(O1, 0, O0);
685 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
686 __ retl(false);
687 __ delayed()->nop();
688 }
689 }
691 return start;
692 }
695 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
696 //
697 // Arguments :
698 //
699 // exchange_value: O0
700 // dest: O1
701 // compare_value: O2
702 //
703 // Results:
704 //
705 // O0: the value previously stored in dest
706 //
707 // Overwrites (v8): O3,O4,O5
708 //
709 address generate_atomic_cmpxchg() {
710 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
711 address start = __ pc();
713 // cmpxchg(dest, compare_value, exchange_value)
714 __ cas_under_lock(O1, O2, O0,
715 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
716 __ retl(false);
717 __ delayed()->nop();
719 return start;
720 }
722 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
723 //
724 // Arguments :
725 //
726 // exchange_value: O1:O0
727 // dest: O2
728 // compare_value: O4:O3
729 //
730 // Results:
731 //
732 // O1:O0: the value previously stored in dest
733 //
734 // This only works on V9, on V8 we don't generate any
735 // code and just return NULL.
736 //
737 // Overwrites: G1,G2,G3
738 //
739 address generate_atomic_cmpxchg_long() {
740 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
741 address start = __ pc();
743 if (!VM_Version::supports_cx8())
744 return NULL;;
745 __ sllx(O0, 32, O0);
746 __ srl(O1, 0, O1);
747 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
748 __ sllx(O3, 32, O3);
749 __ srl(O4, 0, O4);
750 __ or3(O3,O4,O3); // O3 holds 64-bit value from exchange_value
751 __ casx(O2, O3, O0);
752 __ srl(O0, 0, O1); // unpacked return value in O1:O0
753 __ retl(false);
754 __ delayed()->srlx(O0, 32, O0);
756 return start;
757 }
760 // Support for jint Atomic::add(jint add_value, volatile jint* dest).
761 //
762 // Arguments :
763 //
764 // add_value: O0 (e.g., +1 or -1)
765 // dest: O1
766 //
767 // Results:
768 //
769 // O0: the new value stored in dest
770 //
771 // Overwrites (v9): O3
772 // Overwrites (v8): O3,O4,O5
773 //
774 address generate_atomic_add() {
775 StubCodeMark mark(this, "StubRoutines", "atomic_add");
776 address start = __ pc();
777 __ BIND(_atomic_add_stub);
779 if (VM_Version::v9_instructions_work()) {
780 Label(retry);
781 __ BIND(retry);
783 __ lduw(O1, 0, O2);
784 __ add(O0, O2, O3);
785 __ cas(O1, O2, O3);
786 __ cmp( O2, O3);
787 __ br(Assembler::notEqual, false, Assembler::pn, retry);
788 __ delayed()->nop();
789 __ retl(false);
790 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
791 } else {
792 const Register& lock_reg = O2;
793 const Register& lock_ptr_reg = O3;
794 const Register& value_reg = O4;
795 const Register& yield_reg = O5;
797 Label(retry);
798 Label(dontyield);
800 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
801 // got lock, do the increment
802 __ ld(O1, 0, value_reg);
803 __ add(O0, value_reg, value_reg);
804 __ st(value_reg, O1, 0);
806 // %%% only for RMO and PSO
807 __ membar(Assembler::StoreStore);
809 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
811 __ retl(false);
812 __ delayed()->mov(value_reg, O0);
813 }
815 return start;
816 }
817 Label _atomic_add_stub; // called from other stubs
820 //------------------------------------------------------------------------------------------------------------------------
821 // The following routine generates a subroutine to throw an asynchronous
822 // UnknownError when an unsafe access gets a fault that could not be
823 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
824 //
825 // Arguments :
826 //
827 // trapping PC: O7
828 //
829 // Results:
830 // posts an asynchronous exception, skips the trapping instruction
831 //
833 address generate_handler_for_unsafe_access() {
834 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
835 address start = __ pc();
837 const int preserve_register_words = (64 * 2);
838 Address preserve_addr(FP, 0, (-preserve_register_words * wordSize) + STACK_BIAS);
840 Register Lthread = L7_thread_cache;
841 int i;
843 __ save_frame(0);
844 __ mov(G1, L1);
845 __ mov(G2, L2);
846 __ mov(G3, L3);
847 __ mov(G4, L4);
848 __ mov(G5, L5);
849 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
850 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
851 }
853 address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
854 BLOCK_COMMENT("call handle_unsafe_access");
855 __ call(entry_point, relocInfo::runtime_call_type);
856 __ delayed()->nop();
858 __ mov(L1, G1);
859 __ mov(L2, G2);
860 __ mov(L3, G3);
861 __ mov(L4, G4);
862 __ mov(L5, G5);
863 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
864 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
865 }
867 __ verify_thread();
869 __ jmp(O0, 0);
870 __ delayed()->restore();
872 return start;
873 }
876 // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
877 // Arguments :
878 //
879 // ret : O0, returned
880 // icc/xcc: set as O0 (depending on wordSize)
881 // sub : O1, argument, not changed
882 // super: O2, argument, not changed
883 // raddr: O7, blown by call
884 address generate_partial_subtype_check() {
885 __ align(CodeEntryAlignment);
886 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
887 address start = __ pc();
888 Label miss;
890 #if defined(COMPILER2) && !defined(_LP64)
891 // Do not use a 'save' because it blows the 64-bit O registers.
892 __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
893 __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
894 __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
895 __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
896 __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
897 Register Rret = O0;
898 Register Rsub = O1;
899 Register Rsuper = O2;
900 #else
901 __ save_frame(0);
902 Register Rret = I0;
903 Register Rsub = I1;
904 Register Rsuper = I2;
905 #endif
907 Register L0_ary_len = L0;
908 Register L1_ary_ptr = L1;
909 Register L2_super = L2;
910 Register L3_index = L3;
912 __ check_klass_subtype_slow_path(Rsub, Rsuper,
913 L0, L1, L2, L3,
914 NULL, &miss);
916 // Match falls through here.
917 __ addcc(G0,0,Rret); // set Z flags, Z result
919 #if defined(COMPILER2) && !defined(_LP64)
920 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
921 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
922 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
923 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
924 __ retl(); // Result in Rret is zero; flags set to Z
925 __ delayed()->add(SP,4*wordSize,SP);
926 #else
927 __ ret(); // Result in Rret is zero; flags set to Z
928 __ delayed()->restore();
929 #endif
931 __ BIND(miss);
932 __ addcc(G0,1,Rret); // set NZ flags, NZ result
934 #if defined(COMPILER2) && !defined(_LP64)
935 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
936 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
937 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
938 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
939 __ retl(); // Result in Rret is != 0; flags set to NZ
940 __ delayed()->add(SP,4*wordSize,SP);
941 #else
942 __ ret(); // Result in Rret is != 0; flags set to NZ
943 __ delayed()->restore();
944 #endif
946 return start;
947 }
950 // Called from MacroAssembler::verify_oop
951 //
952 address generate_verify_oop_subroutine() {
953 StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
955 address start = __ pc();
957 __ verify_oop_subroutine();
959 return start;
960 }
962 static address disjoint_byte_copy_entry;
963 static address disjoint_short_copy_entry;
964 static address disjoint_int_copy_entry;
965 static address disjoint_long_copy_entry;
966 static address disjoint_oop_copy_entry;
968 static address byte_copy_entry;
969 static address short_copy_entry;
970 static address int_copy_entry;
971 static address long_copy_entry;
972 static address oop_copy_entry;
974 static address checkcast_copy_entry;
976 //
977 // Verify that a register contains clean 32-bits positive value
978 // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
979 //
980 // Input:
981 // Rint - 32-bits value
982 // Rtmp - scratch
983 //
984 void assert_clean_int(Register Rint, Register Rtmp) {
985 #if defined(ASSERT) && defined(_LP64)
986 __ signx(Rint, Rtmp);
987 __ cmp(Rint, Rtmp);
988 __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
989 #endif
990 }
992 //
993 // Generate overlap test for array copy stubs
994 //
995 // Input:
996 // O0 - array1
997 // O1 - array2
998 // O2 - element count
999 //
1000 // Kills temps: O3, O4
1001 //
1002 void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1003 assert(no_overlap_target != NULL, "must be generated");
1004 array_overlap_test(no_overlap_target, NULL, log2_elem_size);
1005 }
1006 void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
1007 array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
1008 }
1009 void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
1010 const Register from = O0;
1011 const Register to = O1;
1012 const Register count = O2;
1013 const Register to_from = O3; // to - from
1014 const Register byte_count = O4; // count << log2_elem_size
1016 __ subcc(to, from, to_from);
1017 __ sll_ptr(count, log2_elem_size, byte_count);
1018 if (NOLp == NULL)
1019 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
1020 else
1021 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
1022 __ delayed()->cmp(to_from, byte_count);
1023 if (NOLp == NULL)
1024 __ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target);
1025 else
1026 __ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp));
1027 __ delayed()->nop();
1028 }
1030 //
1031 // Generate pre-write barrier for array.
1032 //
1033 // Input:
1034 // addr - register containing starting address
1035 // count - register containing element count
1036 // tmp - scratch register
1037 //
1038 // The input registers are overwritten.
1039 //
1040 void gen_write_ref_array_pre_barrier(Register addr, Register count) {
1041 BarrierSet* bs = Universe::heap()->barrier_set();
1042 if (bs->has_write_ref_pre_barrier()) {
1043 assert(bs->has_write_ref_array_pre_opt(),
1044 "Else unsupported barrier set.");
1046 __ save_frame(0);
1047 // Save the necessary global regs... will be used after.
1048 if (addr->is_global()) {
1049 __ mov(addr, L0);
1050 }
1051 if (count->is_global()) {
1052 __ mov(count, L1);
1053 }
1054 __ mov(addr->after_save(), O0);
1055 // Get the count into O1
1056 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
1057 __ delayed()->mov(count->after_save(), O1);
1058 if (addr->is_global()) {
1059 __ mov(L0, addr);
1060 }
1061 if (count->is_global()) {
1062 __ mov(L1, count);
1063 }
1064 __ restore();
1065 }
1066 }
1067 //
1068 // Generate post-write barrier for array.
1069 //
1070 // Input:
1071 // addr - register containing starting address
1072 // count - register containing element count
1073 // tmp - scratch register
1074 //
1075 // The input registers are overwritten.
1076 //
1077 void gen_write_ref_array_post_barrier(Register addr, Register count,
1078 Register tmp) {
1079 BarrierSet* bs = Universe::heap()->barrier_set();
1081 switch (bs->kind()) {
1082 case BarrierSet::G1SATBCT:
1083 case BarrierSet::G1SATBCTLogging:
1084 {
1085 // Get some new fresh output registers.
1086 __ save_frame(0);
1087 __ mov(addr->after_save(), O0);
1088 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1089 __ delayed()->mov(count->after_save(), O1);
1090 __ restore();
1091 }
1092 break;
1093 case BarrierSet::CardTableModRef:
1094 case BarrierSet::CardTableExtension:
1095 {
1096 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1097 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1098 assert_different_registers(addr, count, tmp);
1100 Label L_loop;
1102 __ sll_ptr(count, LogBytesPerHeapOop, count);
1103 __ sub(count, BytesPerHeapOop, count);
1104 __ add(count, addr, count);
1105 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1106 __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
1107 __ srl_ptr(count, CardTableModRefBS::card_shift, count);
1108 __ sub(count, addr, count);
1109 Address rs(tmp, (address)ct->byte_map_base);
1110 __ load_address(rs);
1111 __ BIND(L_loop);
1112 __ stb(G0, rs.base(), addr);
1113 __ subcc(count, 1, count);
1114 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1115 __ delayed()->add(addr, 1, addr);
1117 }
1118 break;
1119 case BarrierSet::ModRef:
1120 break;
1121 default :
1122 ShouldNotReachHere();
1124 }
1125 }
1128 // Copy big chunks forward with shift
1129 //
1130 // Inputs:
1131 // from - source arrays
1132 // to - destination array aligned to 8-bytes
1133 // count - elements count to copy >= the count equivalent to 16 bytes
1134 // count_dec - elements count's decrement equivalent to 16 bytes
1135 // L_copy_bytes - copy exit label
1136 //
1137 void copy_16_bytes_forward_with_shift(Register from, Register to,
1138 Register count, int count_dec, Label& L_copy_bytes) {
1139 Label L_loop, L_aligned_copy, L_copy_last_bytes;
1141 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1142 __ andcc(from, 7, G1); // misaligned bytes
1143 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1144 __ delayed()->nop();
1146 const Register left_shift = G1; // left shift bit counter
1147 const Register right_shift = G5; // right shift bit counter
1149 __ sll(G1, LogBitsPerByte, left_shift);
1150 __ mov(64, right_shift);
1151 __ sub(right_shift, left_shift, right_shift);
1153 //
1154 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1155 // to form 2 aligned 8-bytes chunks to store.
1156 //
1157 __ deccc(count, count_dec); // Pre-decrement 'count'
1158 __ andn(from, 7, from); // Align address
1159 __ ldx(from, 0, O3);
1160 __ inc(from, 8);
1161 __ align(16);
1162 __ BIND(L_loop);
1163 __ ldx(from, 0, O4);
1164 __ deccc(count, count_dec); // Can we do next iteration after this one?
1165 __ ldx(from, 8, G4);
1166 __ inc(to, 16);
1167 __ inc(from, 16);
1168 __ sllx(O3, left_shift, O3);
1169 __ srlx(O4, right_shift, G3);
1170 __ bset(G3, O3);
1171 __ stx(O3, to, -16);
1172 __ sllx(O4, left_shift, O4);
1173 __ srlx(G4, right_shift, G3);
1174 __ bset(G3, O4);
1175 __ stx(O4, to, -8);
1176 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1177 __ delayed()->mov(G4, O3);
1179 __ inccc(count, count_dec>>1 ); // + 8 bytes
1180 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1181 __ delayed()->inc(count, count_dec>>1); // restore 'count'
1183 // copy 8 bytes, part of them already loaded in O3
1184 __ ldx(from, 0, O4);
1185 __ inc(to, 8);
1186 __ inc(from, 8);
1187 __ sllx(O3, left_shift, O3);
1188 __ srlx(O4, right_shift, G3);
1189 __ bset(O3, G3);
1190 __ stx(G3, to, -8);
1192 __ BIND(L_copy_last_bytes);
1193 __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1194 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1195 __ delayed()->sub(from, right_shift, from); // restore address
1197 __ BIND(L_aligned_copy);
1198 }
1200 // Copy big chunks backward with shift
1201 //
1202 // Inputs:
1203 // end_from - source arrays end address
1204 // end_to - destination array end address aligned to 8-bytes
1205 // count - elements count to copy >= the count equivalent to 16 bytes
1206 // count_dec - elements count's decrement equivalent to 16 bytes
1207 // L_aligned_copy - aligned copy exit label
1208 // L_copy_bytes - copy exit label
1209 //
1210 void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1211 Register count, int count_dec,
1212 Label& L_aligned_copy, Label& L_copy_bytes) {
1213 Label L_loop, L_copy_last_bytes;
1215 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1216 __ andcc(end_from, 7, G1); // misaligned bytes
1217 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1218 __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1220 const Register left_shift = G1; // left shift bit counter
1221 const Register right_shift = G5; // right shift bit counter
1223 __ sll(G1, LogBitsPerByte, left_shift);
1224 __ mov(64, right_shift);
1225 __ sub(right_shift, left_shift, right_shift);
1227 //
1228 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1229 // to form 2 aligned 8-bytes chunks to store.
1230 //
1231 __ andn(end_from, 7, end_from); // Align address
1232 __ ldx(end_from, 0, O3);
1233 __ align(16);
1234 __ BIND(L_loop);
1235 __ ldx(end_from, -8, O4);
1236 __ deccc(count, count_dec); // Can we do next iteration after this one?
1237 __ ldx(end_from, -16, G4);
1238 __ dec(end_to, 16);
1239 __ dec(end_from, 16);
1240 __ srlx(O3, right_shift, O3);
1241 __ sllx(O4, left_shift, G3);
1242 __ bset(G3, O3);
1243 __ stx(O3, end_to, 8);
1244 __ srlx(O4, right_shift, O4);
1245 __ sllx(G4, left_shift, G3);
1246 __ bset(G3, O4);
1247 __ stx(O4, end_to, 0);
1248 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1249 __ delayed()->mov(G4, O3);
1251 __ inccc(count, count_dec>>1 ); // + 8 bytes
1252 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1253 __ delayed()->inc(count, count_dec>>1); // restore 'count'
1255 // copy 8 bytes, part of them already loaded in O3
1256 __ ldx(end_from, -8, O4);
1257 __ dec(end_to, 8);
1258 __ dec(end_from, 8);
1259 __ srlx(O3, right_shift, O3);
1260 __ sllx(O4, left_shift, G3);
1261 __ bset(O3, G3);
1262 __ stx(G3, end_to, 0);
1264 __ BIND(L_copy_last_bytes);
1265 __ srl(left_shift, LogBitsPerByte, left_shift); // misaligned bytes
1266 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1267 __ delayed()->add(end_from, left_shift, end_from); // restore address
1268 }
1270 //
1271 // Generate stub for disjoint byte copy. If "aligned" is true, the
1272 // "from" and "to" addresses are assumed to be heapword aligned.
1273 //
1274 // Arguments for generated stub:
1275 // from: O0
1276 // to: O1
1277 // count: O2 treated as signed
1278 //
1279 address generate_disjoint_byte_copy(bool aligned, const char * name) {
1280 __ align(CodeEntryAlignment);
1281 StubCodeMark mark(this, "StubRoutines", name);
1282 address start = __ pc();
1284 Label L_skip_alignment, L_align;
1285 Label L_copy_byte, L_copy_byte_loop, L_exit;
1287 const Register from = O0; // source array address
1288 const Register to = O1; // destination array address
1289 const Register count = O2; // elements count
1290 const Register offset = O5; // offset from start of arrays
1291 // O3, O4, G3, G4 are used as temp registers
1293 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1295 if (!aligned) disjoint_byte_copy_entry = __ pc();
1296 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1297 if (!aligned) BLOCK_COMMENT("Entry:");
1299 // for short arrays, just do single element copy
1300 __ cmp(count, 23); // 16 + 7
1301 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1302 __ delayed()->mov(G0, offset);
1304 if (aligned) {
1305 // 'aligned' == true when it is known statically during compilation
1306 // of this arraycopy call site that both 'from' and 'to' addresses
1307 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1308 //
1309 // Aligned arrays have 4 bytes alignment in 32-bits VM
1310 // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1311 //
1312 #ifndef _LP64
1313 // copy a 4-bytes word if necessary to align 'to' to 8 bytes
1314 __ andcc(to, 7, G0);
1315 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
1316 __ delayed()->ld(from, 0, O3);
1317 __ inc(from, 4);
1318 __ inc(to, 4);
1319 __ dec(count, 4);
1320 __ st(O3, to, -4);
1321 __ BIND(L_skip_alignment);
1322 #endif
1323 } else {
1324 // copy bytes to align 'to' on 8 byte boundary
1325 __ andcc(to, 7, G1); // misaligned bytes
1326 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1327 __ delayed()->neg(G1);
1328 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment
1329 __ sub(count, G1, count);
1330 __ BIND(L_align);
1331 __ ldub(from, 0, O3);
1332 __ deccc(G1);
1333 __ inc(from);
1334 __ stb(O3, to, 0);
1335 __ br(Assembler::notZero, false, Assembler::pt, L_align);
1336 __ delayed()->inc(to);
1337 __ BIND(L_skip_alignment);
1338 }
1339 #ifdef _LP64
1340 if (!aligned)
1341 #endif
1342 {
1343 // Copy with shift 16 bytes per iteration if arrays do not have
1344 // the same alignment mod 8, otherwise fall through to the next
1345 // code for aligned copy.
1346 // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1347 // Also jump over aligned copy after the copy with shift completed.
1349 copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte);
1350 }
1352 // Both array are 8 bytes aligned, copy 16 bytes at a time
1353 __ and3(count, 7, G4); // Save count
1354 __ srl(count, 3, count);
1355 generate_disjoint_long_copy_core(aligned);
1356 __ mov(G4, count); // Restore count
1358 // copy tailing bytes
1359 __ BIND(L_copy_byte);
1360 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1361 __ delayed()->nop();
1362 __ align(16);
1363 __ BIND(L_copy_byte_loop);
1364 __ ldub(from, offset, O3);
1365 __ deccc(count);
1366 __ stb(O3, to, offset);
1367 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1368 __ delayed()->inc(offset);
1370 __ BIND(L_exit);
1371 // O3, O4 are used as temp registers
1372 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1373 __ retl();
1374 __ delayed()->mov(G0, O0); // return 0
1375 return start;
1376 }
1378 //
1379 // Generate stub for conjoint byte copy. If "aligned" is true, the
1380 // "from" and "to" addresses are assumed to be heapword aligned.
1381 //
1382 // Arguments for generated stub:
1383 // from: O0
1384 // to: O1
1385 // count: O2 treated as signed
1386 //
1387 address generate_conjoint_byte_copy(bool aligned, const char * name) {
1388 // Do reverse copy.
1390 __ align(CodeEntryAlignment);
1391 StubCodeMark mark(this, "StubRoutines", name);
1392 address start = __ pc();
1393 address nooverlap_target = aligned ?
1394 StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
1395 disjoint_byte_copy_entry;
1397 Label L_skip_alignment, L_align, L_aligned_copy;
1398 Label L_copy_byte, L_copy_byte_loop, L_exit;
1400 const Register from = O0; // source array address
1401 const Register to = O1; // destination array address
1402 const Register count = O2; // elements count
1403 const Register end_from = from; // source array end address
1404 const Register end_to = to; // destination array end address
1406 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1408 if (!aligned) byte_copy_entry = __ pc();
1409 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1410 if (!aligned) BLOCK_COMMENT("Entry:");
1412 array_overlap_test(nooverlap_target, 0);
1414 __ add(to, count, end_to); // offset after last copied element
1416 // for short arrays, just do single element copy
1417 __ cmp(count, 23); // 16 + 7
1418 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1419 __ delayed()->add(from, count, end_from);
1421 {
1422 // Align end of arrays since they could be not aligned even
1423 // when arrays itself are aligned.
1425 // copy bytes to align 'end_to' on 8 byte boundary
1426 __ andcc(end_to, 7, G1); // misaligned bytes
1427 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1428 __ delayed()->nop();
1429 __ sub(count, G1, count);
1430 __ BIND(L_align);
1431 __ dec(end_from);
1432 __ dec(end_to);
1433 __ ldub(end_from, 0, O3);
1434 __ deccc(G1);
1435 __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1436 __ delayed()->stb(O3, end_to, 0);
1437 __ BIND(L_skip_alignment);
1438 }
1439 #ifdef _LP64
1440 if (aligned) {
1441 // Both arrays are aligned to 8-bytes in 64-bits VM.
1442 // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1443 // in unaligned case.
1444 __ dec(count, 16);
1445 } else
1446 #endif
1447 {
1448 // Copy with shift 16 bytes per iteration if arrays do not have
1449 // the same alignment mod 8, otherwise jump to the next
1450 // code for aligned copy (and substracting 16 from 'count' before jump).
1451 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1452 // Also jump over aligned copy after the copy with shift completed.
1454 copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1455 L_aligned_copy, L_copy_byte);
1456 }
1457 // copy 4 elements (16 bytes) at a time
1458 __ align(16);
1459 __ BIND(L_aligned_copy);
1460 __ dec(end_from, 16);
1461 __ ldx(end_from, 8, O3);
1462 __ ldx(end_from, 0, O4);
1463 __ dec(end_to, 16);
1464 __ deccc(count, 16);
1465 __ stx(O3, end_to, 8);
1466 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1467 __ delayed()->stx(O4, end_to, 0);
1468 __ inc(count, 16);
1470 // copy 1 element (2 bytes) at a time
1471 __ BIND(L_copy_byte);
1472 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1473 __ delayed()->nop();
1474 __ align(16);
1475 __ BIND(L_copy_byte_loop);
1476 __ dec(end_from);
1477 __ dec(end_to);
1478 __ ldub(end_from, 0, O4);
1479 __ deccc(count);
1480 __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1481 __ delayed()->stb(O4, end_to, 0);
1483 __ BIND(L_exit);
1484 // O3, O4 are used as temp registers
1485 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1486 __ retl();
1487 __ delayed()->mov(G0, O0); // return 0
1488 return start;
1489 }
1491 //
1492 // Generate stub for disjoint short copy. If "aligned" is true, the
1493 // "from" and "to" addresses are assumed to be heapword aligned.
1494 //
1495 // Arguments for generated stub:
1496 // from: O0
1497 // to: O1
1498 // count: O2 treated as signed
1499 //
1500 address generate_disjoint_short_copy(bool aligned, const char * name) {
1501 __ align(CodeEntryAlignment);
1502 StubCodeMark mark(this, "StubRoutines", name);
1503 address start = __ pc();
1505 Label L_skip_alignment, L_skip_alignment2;
1506 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1508 const Register from = O0; // source array address
1509 const Register to = O1; // destination array address
1510 const Register count = O2; // elements count
1511 const Register offset = O5; // offset from start of arrays
1512 // O3, O4, G3, G4 are used as temp registers
1514 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1516 if (!aligned) disjoint_short_copy_entry = __ pc();
1517 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1518 if (!aligned) BLOCK_COMMENT("Entry:");
1520 // for short arrays, just do single element copy
1521 __ cmp(count, 11); // 8 + 3 (22 bytes)
1522 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1523 __ delayed()->mov(G0, offset);
1525 if (aligned) {
1526 // 'aligned' == true when it is known statically during compilation
1527 // of this arraycopy call site that both 'from' and 'to' addresses
1528 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1529 //
1530 // Aligned arrays have 4 bytes alignment in 32-bits VM
1531 // and 8 bytes - in 64-bits VM.
1532 //
1533 #ifndef _LP64
1534 // copy a 2-elements word if necessary to align 'to' to 8 bytes
1535 __ andcc(to, 7, G0);
1536 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1537 __ delayed()->ld(from, 0, O3);
1538 __ inc(from, 4);
1539 __ inc(to, 4);
1540 __ dec(count, 2);
1541 __ st(O3, to, -4);
1542 __ BIND(L_skip_alignment);
1543 #endif
1544 } else {
1545 // copy 1 element if necessary to align 'to' on an 4 bytes
1546 __ andcc(to, 3, G0);
1547 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1548 __ delayed()->lduh(from, 0, O3);
1549 __ inc(from, 2);
1550 __ inc(to, 2);
1551 __ dec(count);
1552 __ sth(O3, to, -2);
1553 __ BIND(L_skip_alignment);
1555 // copy 2 elements to align 'to' on an 8 byte boundary
1556 __ andcc(to, 7, G0);
1557 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1558 __ delayed()->lduh(from, 0, O3);
1559 __ dec(count, 2);
1560 __ lduh(from, 2, O4);
1561 __ inc(from, 4);
1562 __ inc(to, 4);
1563 __ sth(O3, to, -4);
1564 __ sth(O4, to, -2);
1565 __ BIND(L_skip_alignment2);
1566 }
1567 #ifdef _LP64
1568 if (!aligned)
1569 #endif
1570 {
1571 // Copy with shift 16 bytes per iteration if arrays do not have
1572 // the same alignment mod 8, otherwise fall through to the next
1573 // code for aligned copy.
1574 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1575 // Also jump over aligned copy after the copy with shift completed.
1577 copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes);
1578 }
1580 // Both array are 8 bytes aligned, copy 16 bytes at a time
1581 __ and3(count, 3, G4); // Save
1582 __ srl(count, 2, count);
1583 generate_disjoint_long_copy_core(aligned);
1584 __ mov(G4, count); // restore
1586 // copy 1 element at a time
1587 __ BIND(L_copy_2_bytes);
1588 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1589 __ delayed()->nop();
1590 __ align(16);
1591 __ BIND(L_copy_2_bytes_loop);
1592 __ lduh(from, offset, O3);
1593 __ deccc(count);
1594 __ sth(O3, to, offset);
1595 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1596 __ delayed()->inc(offset, 2);
1598 __ BIND(L_exit);
1599 // O3, O4 are used as temp registers
1600 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1601 __ retl();
1602 __ delayed()->mov(G0, O0); // return 0
1603 return start;
1604 }
1606 //
1607 // Generate stub for conjoint short copy. If "aligned" is true, the
1608 // "from" and "to" addresses are assumed to be heapword aligned.
1609 //
1610 // Arguments for generated stub:
1611 // from: O0
1612 // to: O1
1613 // count: O2 treated as signed
1614 //
1615 address generate_conjoint_short_copy(bool aligned, const char * name) {
1616 // Do reverse copy.
1618 __ align(CodeEntryAlignment);
1619 StubCodeMark mark(this, "StubRoutines", name);
1620 address start = __ pc();
1621 address nooverlap_target = aligned ?
1622 StubRoutines::arrayof_jshort_disjoint_arraycopy() :
1623 disjoint_short_copy_entry;
1625 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1626 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1628 const Register from = O0; // source array address
1629 const Register to = O1; // destination array address
1630 const Register count = O2; // elements count
1631 const Register end_from = from; // source array end address
1632 const Register end_to = to; // destination array end address
1634 const Register byte_count = O3; // bytes count to copy
1636 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1638 if (!aligned) short_copy_entry = __ pc();
1639 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1640 if (!aligned) BLOCK_COMMENT("Entry:");
1642 array_overlap_test(nooverlap_target, 1);
1644 __ sllx(count, LogBytesPerShort, byte_count);
1645 __ add(to, byte_count, end_to); // offset after last copied element
1647 // for short arrays, just do single element copy
1648 __ cmp(count, 11); // 8 + 3 (22 bytes)
1649 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1650 __ delayed()->add(from, byte_count, end_from);
1652 {
1653 // Align end of arrays since they could be not aligned even
1654 // when arrays itself are aligned.
1656 // copy 1 element if necessary to align 'end_to' on an 4 bytes
1657 __ andcc(end_to, 3, G0);
1658 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1659 __ delayed()->lduh(end_from, -2, O3);
1660 __ dec(end_from, 2);
1661 __ dec(end_to, 2);
1662 __ dec(count);
1663 __ sth(O3, end_to, 0);
1664 __ BIND(L_skip_alignment);
1666 // copy 2 elements to align 'end_to' on an 8 byte boundary
1667 __ andcc(end_to, 7, G0);
1668 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1669 __ delayed()->lduh(end_from, -2, O3);
1670 __ dec(count, 2);
1671 __ lduh(end_from, -4, O4);
1672 __ dec(end_from, 4);
1673 __ dec(end_to, 4);
1674 __ sth(O3, end_to, 2);
1675 __ sth(O4, end_to, 0);
1676 __ BIND(L_skip_alignment2);
1677 }
1678 #ifdef _LP64
1679 if (aligned) {
1680 // Both arrays are aligned to 8-bytes in 64-bits VM.
1681 // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1682 // in unaligned case.
1683 __ dec(count, 8);
1684 } else
1685 #endif
1686 {
1687 // Copy with shift 16 bytes per iteration if arrays do not have
1688 // the same alignment mod 8, otherwise jump to the next
1689 // code for aligned copy (and substracting 8 from 'count' before jump).
1690 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1691 // Also jump over aligned copy after the copy with shift completed.
1693 copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
1694 L_aligned_copy, L_copy_2_bytes);
1695 }
1696 // copy 4 elements (16 bytes) at a time
1697 __ align(16);
1698 __ BIND(L_aligned_copy);
1699 __ dec(end_from, 16);
1700 __ ldx(end_from, 8, O3);
1701 __ ldx(end_from, 0, O4);
1702 __ dec(end_to, 16);
1703 __ deccc(count, 8);
1704 __ stx(O3, end_to, 8);
1705 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1706 __ delayed()->stx(O4, end_to, 0);
1707 __ inc(count, 8);
1709 // copy 1 element (2 bytes) at a time
1710 __ BIND(L_copy_2_bytes);
1711 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1712 __ delayed()->nop();
1713 __ BIND(L_copy_2_bytes_loop);
1714 __ dec(end_from, 2);
1715 __ dec(end_to, 2);
1716 __ lduh(end_from, 0, O4);
1717 __ deccc(count);
1718 __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
1719 __ delayed()->sth(O4, end_to, 0);
1721 __ BIND(L_exit);
1722 // O3, O4 are used as temp registers
1723 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1724 __ retl();
1725 __ delayed()->mov(G0, O0); // return 0
1726 return start;
1727 }
1729 //
1730 // Generate core code for disjoint int copy (and oop copy on 32-bit).
1731 // If "aligned" is true, the "from" and "to" addresses are assumed
1732 // to be heapword aligned.
1733 //
1734 // Arguments:
1735 // from: O0
1736 // to: O1
1737 // count: O2 treated as signed
1738 //
1739 void generate_disjoint_int_copy_core(bool aligned) {
1741 Label L_skip_alignment, L_aligned_copy;
1742 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1744 const Register from = O0; // source array address
1745 const Register to = O1; // destination array address
1746 const Register count = O2; // elements count
1747 const Register offset = O5; // offset from start of arrays
1748 // O3, O4, G3, G4 are used as temp registers
1750 // 'aligned' == true when it is known statically during compilation
1751 // of this arraycopy call site that both 'from' and 'to' addresses
1752 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1753 //
1754 // Aligned arrays have 4 bytes alignment in 32-bits VM
1755 // and 8 bytes - in 64-bits VM.
1756 //
1757 #ifdef _LP64
1758 if (!aligned)
1759 #endif
1760 {
1761 // The next check could be put under 'ifndef' since the code in
1762 // generate_disjoint_long_copy_core() has own checks and set 'offset'.
1764 // for short arrays, just do single element copy
1765 __ cmp(count, 5); // 4 + 1 (20 bytes)
1766 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1767 __ delayed()->mov(G0, offset);
1769 // copy 1 element to align 'to' on an 8 byte boundary
1770 __ andcc(to, 7, G0);
1771 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1772 __ delayed()->ld(from, 0, O3);
1773 __ inc(from, 4);
1774 __ inc(to, 4);
1775 __ dec(count);
1776 __ st(O3, to, -4);
1777 __ BIND(L_skip_alignment);
1779 // if arrays have same alignment mod 8, do 4 elements copy
1780 __ andcc(from, 7, G0);
1781 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1782 __ delayed()->ld(from, 0, O3);
1784 //
1785 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1786 // to form 2 aligned 8-bytes chunks to store.
1787 //
1788 // copy_16_bytes_forward_with_shift() is not used here since this
1789 // code is more optimal.
1791 // copy with shift 4 elements (16 bytes) at a time
1792 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4
1794 __ align(16);
1795 __ BIND(L_copy_16_bytes);
1796 __ ldx(from, 4, O4);
1797 __ deccc(count, 4); // Can we do next iteration after this one?
1798 __ ldx(from, 12, G4);
1799 __ inc(to, 16);
1800 __ inc(from, 16);
1801 __ sllx(O3, 32, O3);
1802 __ srlx(O4, 32, G3);
1803 __ bset(G3, O3);
1804 __ stx(O3, to, -16);
1805 __ sllx(O4, 32, O4);
1806 __ srlx(G4, 32, G3);
1807 __ bset(G3, O4);
1808 __ stx(O4, to, -8);
1809 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
1810 __ delayed()->mov(G4, O3);
1812 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
1813 __ delayed()->inc(count, 4); // restore 'count'
1815 __ BIND(L_aligned_copy);
1816 }
1817 // copy 4 elements (16 bytes) at a time
1818 __ and3(count, 1, G4); // Save
1819 __ srl(count, 1, count);
1820 generate_disjoint_long_copy_core(aligned);
1821 __ mov(G4, count); // Restore
1823 // copy 1 element at a time
1824 __ BIND(L_copy_4_bytes);
1825 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1826 __ delayed()->nop();
1827 __ BIND(L_copy_4_bytes_loop);
1828 __ ld(from, offset, O3);
1829 __ deccc(count);
1830 __ st(O3, to, offset);
1831 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
1832 __ delayed()->inc(offset, 4);
1833 __ BIND(L_exit);
1834 }
1836 //
1837 // Generate stub for disjoint int copy. If "aligned" is true, the
1838 // "from" and "to" addresses are assumed to be heapword aligned.
1839 //
1840 // Arguments for generated stub:
1841 // from: O0
1842 // to: O1
1843 // count: O2 treated as signed
1844 //
1845 address generate_disjoint_int_copy(bool aligned, const char * name) {
1846 __ align(CodeEntryAlignment);
1847 StubCodeMark mark(this, "StubRoutines", name);
1848 address start = __ pc();
1850 const Register count = O2;
1851 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1853 if (!aligned) disjoint_int_copy_entry = __ pc();
1854 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1855 if (!aligned) BLOCK_COMMENT("Entry:");
1857 generate_disjoint_int_copy_core(aligned);
1859 // O3, O4 are used as temp registers
1860 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
1861 __ retl();
1862 __ delayed()->mov(G0, O0); // return 0
1863 return start;
1864 }
1866 //
1867 // Generate core code for conjoint int copy (and oop copy on 32-bit).
1868 // If "aligned" is true, the "from" and "to" addresses are assumed
1869 // to be heapword aligned.
1870 //
1871 // Arguments:
1872 // from: O0
1873 // to: O1
1874 // count: O2 treated as signed
1875 //
1876 void generate_conjoint_int_copy_core(bool aligned) {
1877 // Do reverse copy.
1879 Label L_skip_alignment, L_aligned_copy;
1880 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1882 const Register from = O0; // source array address
1883 const Register to = O1; // destination array address
1884 const Register count = O2; // elements count
1885 const Register end_from = from; // source array end address
1886 const Register end_to = to; // destination array end address
1887 // O3, O4, O5, G3 are used as temp registers
1889 const Register byte_count = O3; // bytes count to copy
1891 __ sllx(count, LogBytesPerInt, byte_count);
1892 __ add(to, byte_count, end_to); // offset after last copied element
1894 __ cmp(count, 5); // for short arrays, just do single element copy
1895 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1896 __ delayed()->add(from, byte_count, end_from);
1898 // copy 1 element to align 'to' on an 8 byte boundary
1899 __ andcc(end_to, 7, G0);
1900 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1901 __ delayed()->nop();
1902 __ dec(count);
1903 __ dec(end_from, 4);
1904 __ dec(end_to, 4);
1905 __ ld(end_from, 0, O4);
1906 __ st(O4, end_to, 0);
1907 __ BIND(L_skip_alignment);
1909 // Check if 'end_from' and 'end_to' has the same alignment.
1910 __ andcc(end_from, 7, G0);
1911 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1912 __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
1914 // copy with shift 4 elements (16 bytes) at a time
1915 //
1916 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1917 // to form 2 aligned 8-bytes chunks to store.
1918 //
1919 __ ldx(end_from, -4, O3);
1920 __ align(16);
1921 __ BIND(L_copy_16_bytes);
1922 __ ldx(end_from, -12, O4);
1923 __ deccc(count, 4);
1924 __ ldx(end_from, -20, O5);
1925 __ dec(end_to, 16);
1926 __ dec(end_from, 16);
1927 __ srlx(O3, 32, O3);
1928 __ sllx(O4, 32, G3);
1929 __ bset(G3, O3);
1930 __ stx(O3, end_to, 8);
1931 __ srlx(O4, 32, O4);
1932 __ sllx(O5, 32, G3);
1933 __ bset(O4, G3);
1934 __ stx(G3, end_to, 0);
1935 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
1936 __ delayed()->mov(O5, O3);
1938 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
1939 __ delayed()->inc(count, 4);
1941 // copy 4 elements (16 bytes) at a time
1942 __ align(16);
1943 __ BIND(L_aligned_copy);
1944 __ dec(end_from, 16);
1945 __ ldx(end_from, 8, O3);
1946 __ ldx(end_from, 0, O4);
1947 __ dec(end_to, 16);
1948 __ deccc(count, 4);
1949 __ stx(O3, end_to, 8);
1950 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1951 __ delayed()->stx(O4, end_to, 0);
1952 __ inc(count, 4);
1954 // copy 1 element (4 bytes) at a time
1955 __ BIND(L_copy_4_bytes);
1956 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1957 __ delayed()->nop();
1958 __ BIND(L_copy_4_bytes_loop);
1959 __ dec(end_from, 4);
1960 __ dec(end_to, 4);
1961 __ ld(end_from, 0, O4);
1962 __ deccc(count);
1963 __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
1964 __ delayed()->st(O4, end_to, 0);
1965 __ BIND(L_exit);
1966 }
1968 //
1969 // Generate stub for conjoint int copy. If "aligned" is true, the
1970 // "from" and "to" addresses are assumed to be heapword aligned.
1971 //
1972 // Arguments for generated stub:
1973 // from: O0
1974 // to: O1
1975 // count: O2 treated as signed
1976 //
1977 address generate_conjoint_int_copy(bool aligned, const char * name) {
1978 __ align(CodeEntryAlignment);
1979 StubCodeMark mark(this, "StubRoutines", name);
1980 address start = __ pc();
1982 address nooverlap_target = aligned ?
1983 StubRoutines::arrayof_jint_disjoint_arraycopy() :
1984 disjoint_int_copy_entry;
1986 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
1988 if (!aligned) int_copy_entry = __ pc();
1989 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1990 if (!aligned) BLOCK_COMMENT("Entry:");
1992 array_overlap_test(nooverlap_target, 2);
1994 generate_conjoint_int_copy_core(aligned);
1996 // O3, O4 are used as temp registers
1997 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
1998 __ retl();
1999 __ delayed()->mov(G0, O0); // return 0
2000 return start;
2001 }
2003 //
2004 // Generate core code for disjoint long copy (and oop copy on 64-bit).
2005 // "aligned" is ignored, because we must make the stronger
2006 // assumption that both addresses are always 64-bit aligned.
2007 //
2008 // Arguments:
2009 // from: O0
2010 // to: O1
2011 // count: O2 treated as signed
2012 //
2013 void generate_disjoint_long_copy_core(bool aligned) {
2014 Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2015 const Register from = O0; // source array address
2016 const Register to = O1; // destination array address
2017 const Register count = O2; // elements count
2018 const Register offset0 = O4; // element offset
2019 const Register offset8 = O5; // next element offset
2021 __ deccc(count, 2);
2022 __ mov(G0, offset0); // offset from start of arrays (0)
2023 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2024 __ delayed()->add(offset0, 8, offset8);
2025 __ align(16);
2026 __ BIND(L_copy_16_bytes);
2027 __ ldx(from, offset0, O3);
2028 __ ldx(from, offset8, G3);
2029 __ deccc(count, 2);
2030 __ stx(O3, to, offset0);
2031 __ inc(offset0, 16);
2032 __ stx(G3, to, offset8);
2033 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2034 __ delayed()->inc(offset8, 16);
2036 __ BIND(L_copy_8_bytes);
2037 __ inccc(count, 2);
2038 __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2039 __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2040 __ ldx(from, offset0, O3);
2041 __ stx(O3, to, offset0);
2042 __ BIND(L_exit);
2043 }
2045 //
2046 // Generate stub for disjoint long copy.
2047 // "aligned" is ignored, because we must make the stronger
2048 // assumption that both addresses are always 64-bit aligned.
2049 //
2050 // Arguments for generated stub:
2051 // from: O0
2052 // to: O1
2053 // count: O2 treated as signed
2054 //
2055 address generate_disjoint_long_copy(bool aligned, const char * name) {
2056 __ align(CodeEntryAlignment);
2057 StubCodeMark mark(this, "StubRoutines", name);
2058 address start = __ pc();
2060 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2062 if (!aligned) disjoint_long_copy_entry = __ pc();
2063 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2064 if (!aligned) BLOCK_COMMENT("Entry:");
2066 generate_disjoint_long_copy_core(aligned);
2068 // O3, O4 are used as temp registers
2069 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2070 __ retl();
2071 __ delayed()->mov(G0, O0); // return 0
2072 return start;
2073 }
2075 //
2076 // Generate core code for conjoint long copy (and oop copy on 64-bit).
2077 // "aligned" is ignored, because we must make the stronger
2078 // assumption that both addresses are always 64-bit aligned.
2079 //
2080 // Arguments:
2081 // from: O0
2082 // to: O1
2083 // count: O2 treated as signed
2084 //
2085 void generate_conjoint_long_copy_core(bool aligned) {
2086 // Do reverse copy.
2087 Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2088 const Register from = O0; // source array address
2089 const Register to = O1; // destination array address
2090 const Register count = O2; // elements count
2091 const Register offset8 = O4; // element offset
2092 const Register offset0 = O5; // previous element offset
2094 __ subcc(count, 1, count);
2095 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2096 __ delayed()->sllx(count, LogBytesPerLong, offset8);
2097 __ sub(offset8, 8, offset0);
2098 __ align(16);
2099 __ BIND(L_copy_16_bytes);
2100 __ ldx(from, offset8, O2);
2101 __ ldx(from, offset0, O3);
2102 __ stx(O2, to, offset8);
2103 __ deccc(offset8, 16); // use offset8 as counter
2104 __ stx(O3, to, offset0);
2105 __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2106 __ delayed()->dec(offset0, 16);
2108 __ BIND(L_copy_8_bytes);
2109 __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2110 __ delayed()->nop();
2111 __ ldx(from, 0, O3);
2112 __ stx(O3, to, 0);
2113 __ BIND(L_exit);
2114 }
2116 // Generate stub for conjoint long copy.
2117 // "aligned" is ignored, because we must make the stronger
2118 // assumption that both addresses are always 64-bit aligned.
2119 //
2120 // Arguments for generated stub:
2121 // from: O0
2122 // to: O1
2123 // count: O2 treated as signed
2124 //
2125 address generate_conjoint_long_copy(bool aligned, const char * name) {
2126 __ align(CodeEntryAlignment);
2127 StubCodeMark mark(this, "StubRoutines", name);
2128 address start = __ pc();
2130 assert(!aligned, "usage");
2131 address nooverlap_target = disjoint_long_copy_entry;
2133 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2135 if (!aligned) long_copy_entry = __ pc();
2136 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2137 if (!aligned) BLOCK_COMMENT("Entry:");
2139 array_overlap_test(nooverlap_target, 3);
2141 generate_conjoint_long_copy_core(aligned);
2143 // O3, O4 are used as temp registers
2144 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2145 __ retl();
2146 __ delayed()->mov(G0, O0); // return 0
2147 return start;
2148 }
2150 // Generate stub for disjoint oop copy. If "aligned" is true, the
2151 // "from" and "to" addresses are assumed to be heapword aligned.
2152 //
2153 // Arguments for generated stub:
2154 // from: O0
2155 // to: O1
2156 // count: O2 treated as signed
2157 //
2158 address generate_disjoint_oop_copy(bool aligned, const char * name) {
2160 const Register from = O0; // source array address
2161 const Register to = O1; // destination array address
2162 const Register count = O2; // elements count
2164 __ align(CodeEntryAlignment);
2165 StubCodeMark mark(this, "StubRoutines", name);
2166 address start = __ pc();
2168 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2170 if (!aligned) disjoint_oop_copy_entry = __ pc();
2171 // caller can pass a 64-bit byte count here
2172 if (!aligned) BLOCK_COMMENT("Entry:");
2174 // save arguments for barrier generation
2175 __ mov(to, G1);
2176 __ mov(count, G5);
2177 gen_write_ref_array_pre_barrier(G1, G5);
2178 #ifdef _LP64
2179 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2180 if (UseCompressedOops) {
2181 generate_disjoint_int_copy_core(aligned);
2182 } else {
2183 generate_disjoint_long_copy_core(aligned);
2184 }
2185 #else
2186 generate_disjoint_int_copy_core(aligned);
2187 #endif
2188 // O0 is used as temp register
2189 gen_write_ref_array_post_barrier(G1, G5, O0);
2191 // O3, O4 are used as temp registers
2192 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2193 __ retl();
2194 __ delayed()->mov(G0, O0); // return 0
2195 return start;
2196 }
2198 // Generate stub for conjoint oop copy. If "aligned" is true, the
2199 // "from" and "to" addresses are assumed to be heapword aligned.
2200 //
2201 // Arguments for generated stub:
2202 // from: O0
2203 // to: O1
2204 // count: O2 treated as signed
2205 //
2206 address generate_conjoint_oop_copy(bool aligned, const char * name) {
2208 const Register from = O0; // source array address
2209 const Register to = O1; // destination array address
2210 const Register count = O2; // elements count
2212 __ align(CodeEntryAlignment);
2213 StubCodeMark mark(this, "StubRoutines", name);
2214 address start = __ pc();
2216 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2218 if (!aligned) oop_copy_entry = __ pc();
2219 // caller can pass a 64-bit byte count here
2220 if (!aligned) BLOCK_COMMENT("Entry:");
2222 // save arguments for barrier generation
2223 __ mov(to, G1);
2224 __ mov(count, G5);
2226 gen_write_ref_array_pre_barrier(G1, G5);
2228 address nooverlap_target = aligned ?
2229 StubRoutines::arrayof_oop_disjoint_arraycopy() :
2230 disjoint_oop_copy_entry;
2232 array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2234 #ifdef _LP64
2235 if (UseCompressedOops) {
2236 generate_conjoint_int_copy_core(aligned);
2237 } else {
2238 generate_conjoint_long_copy_core(aligned);
2239 }
2240 #else
2241 generate_conjoint_int_copy_core(aligned);
2242 #endif
2244 // O0 is used as temp register
2245 gen_write_ref_array_post_barrier(G1, G5, O0);
2247 // O3, O4 are used as temp registers
2248 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2249 __ retl();
2250 __ delayed()->mov(G0, O0); // return 0
2251 return start;
2252 }
2255 // Helper for generating a dynamic type check.
2256 // Smashes only the given temp registers.
2257 void generate_type_check(Register sub_klass,
2258 Register super_check_offset,
2259 Register super_klass,
2260 Register temp,
2261 Label& L_success) {
2262 assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2264 BLOCK_COMMENT("type_check:");
2266 Label L_miss, L_pop_to_miss;
2268 assert_clean_int(super_check_offset, temp);
2270 __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
2271 &L_success, &L_miss, NULL,
2272 super_check_offset);
2274 BLOCK_COMMENT("type_check_slow_path:");
2275 __ save_frame(0);
2276 __ check_klass_subtype_slow_path(sub_klass->after_save(),
2277 super_klass->after_save(),
2278 L0, L1, L2, L4,
2279 NULL, &L_pop_to_miss);
2280 __ ba(false, L_success);
2281 __ delayed()->restore();
2283 __ bind(L_pop_to_miss);
2284 __ restore();
2286 // Fall through on failure!
2287 __ BIND(L_miss);
2288 }
2291 // Generate stub for checked oop copy.
2292 //
2293 // Arguments for generated stub:
2294 // from: O0
2295 // to: O1
2296 // count: O2 treated as signed
2297 // ckoff: O3 (super_check_offset)
2298 // ckval: O4 (super_klass)
2299 // ret: O0 zero for success; (-1^K) where K is partial transfer count
2300 //
2301 address generate_checkcast_copy(const char* name) {
2303 const Register O0_from = O0; // source array address
2304 const Register O1_to = O1; // destination array address
2305 const Register O2_count = O2; // elements count
2306 const Register O3_ckoff = O3; // super_check_offset
2307 const Register O4_ckval = O4; // super_klass
2309 const Register O5_offset = O5; // loop var, with stride wordSize
2310 const Register G1_remain = G1; // loop var, with stride -1
2311 const Register G3_oop = G3; // actual oop copied
2312 const Register G4_klass = G4; // oop._klass
2313 const Register G5_super = G5; // oop._klass._primary_supers[ckval]
2315 __ align(CodeEntryAlignment);
2316 StubCodeMark mark(this, "StubRoutines", name);
2317 address start = __ pc();
2319 gen_write_ref_array_pre_barrier(O1, O2);
2321 #ifdef ASSERT
2322 // We sometimes save a frame (see generate_type_check below).
2323 // If this will cause trouble, let's fail now instead of later.
2324 __ save_frame(0);
2325 __ restore();
2326 #endif
2328 #ifdef ASSERT
2329 // caller guarantees that the arrays really are different
2330 // otherwise, we would have to make conjoint checks
2331 { Label L;
2332 __ mov(O3, G1); // spill: overlap test smashes O3
2333 __ mov(O4, G4); // spill: overlap test smashes O4
2334 array_overlap_test(L, LogBytesPerHeapOop);
2335 __ stop("checkcast_copy within a single array");
2336 __ bind(L);
2337 __ mov(G1, O3);
2338 __ mov(G4, O4);
2339 }
2340 #endif //ASSERT
2342 assert_clean_int(O2_count, G1); // Make sure 'count' is clean int.
2344 checkcast_copy_entry = __ pc();
2345 // caller can pass a 64-bit byte count here (from generic stub)
2346 BLOCK_COMMENT("Entry:");
2348 Label load_element, store_element, do_card_marks, fail, done;
2349 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it
2350 __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2351 __ delayed()->mov(G0, O5_offset); // offset from start of arrays
2353 // Empty array: Nothing to do.
2354 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2355 __ retl();
2356 __ delayed()->set(0, O0); // return 0 on (trivial) success
2358 // ======== begin loop ========
2359 // (Loop is rotated; its entry is load_element.)
2360 // Loop variables:
2361 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2362 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2363 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2364 __ align(16);
2366 __ BIND(store_element);
2367 __ deccc(G1_remain); // decrement the count
2368 __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
2369 __ inc(O5_offset, heapOopSize); // step to next offset
2370 __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
2371 __ delayed()->set(0, O0); // return -1 on success
2373 // ======== loop entry is here ========
2374 __ BIND(load_element);
2375 __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop
2376 __ br_null(G3_oop, true, Assembler::pt, store_element);
2377 __ delayed()->nop();
2379 __ load_klass(G3_oop, G4_klass); // query the object klass
2381 generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2382 // branch to this on success:
2383 store_element);
2384 // ======== end loop ========
2386 // It was a real error; we must depend on the caller to finish the job.
2387 // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2388 // Emit GC store barriers for the oops we have copied (O2 minus G1),
2389 // and report their number to the caller.
2390 __ BIND(fail);
2391 __ subcc(O2_count, G1_remain, O2_count);
2392 __ brx(Assembler::zero, false, Assembler::pt, done);
2393 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller
2395 __ BIND(do_card_marks);
2396 gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2]
2398 __ BIND(done);
2399 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2400 __ retl();
2401 __ delayed()->nop(); // return value in 00
2403 return start;
2404 }
2407 // Generate 'unsafe' array copy stub
2408 // Though just as safe as the other stubs, it takes an unscaled
2409 // size_t argument instead of an element count.
2410 //
2411 // Arguments for generated stub:
2412 // from: O0
2413 // to: O1
2414 // count: O2 byte count, treated as ssize_t, can be zero
2415 //
2416 // Examines the alignment of the operands and dispatches
2417 // to a long, int, short, or byte copy loop.
2418 //
2419 address generate_unsafe_copy(const char* name) {
2421 const Register O0_from = O0; // source array address
2422 const Register O1_to = O1; // destination array address
2423 const Register O2_count = O2; // elements count
2425 const Register G1_bits = G1; // test copy of low bits
2427 __ align(CodeEntryAlignment);
2428 StubCodeMark mark(this, "StubRoutines", name);
2429 address start = __ pc();
2431 // bump this on entry, not on exit:
2432 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2434 __ or3(O0_from, O1_to, G1_bits);
2435 __ or3(O2_count, G1_bits, G1_bits);
2437 __ btst(BytesPerLong-1, G1_bits);
2438 __ br(Assembler::zero, true, Assembler::pt,
2439 long_copy_entry, relocInfo::runtime_call_type);
2440 // scale the count on the way out:
2441 __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2443 __ btst(BytesPerInt-1, G1_bits);
2444 __ br(Assembler::zero, true, Assembler::pt,
2445 int_copy_entry, relocInfo::runtime_call_type);
2446 // scale the count on the way out:
2447 __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2449 __ btst(BytesPerShort-1, G1_bits);
2450 __ br(Assembler::zero, true, Assembler::pt,
2451 short_copy_entry, relocInfo::runtime_call_type);
2452 // scale the count on the way out:
2453 __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2455 __ br(Assembler::always, false, Assembler::pt,
2456 byte_copy_entry, relocInfo::runtime_call_type);
2457 __ delayed()->nop();
2459 return start;
2460 }
2463 // Perform range checks on the proposed arraycopy.
2464 // Kills the two temps, but nothing else.
2465 // Also, clean the sign bits of src_pos and dst_pos.
2466 void arraycopy_range_checks(Register src, // source array oop (O0)
2467 Register src_pos, // source position (O1)
2468 Register dst, // destination array oo (O2)
2469 Register dst_pos, // destination position (O3)
2470 Register length, // length of copy (O4)
2471 Register temp1, Register temp2,
2472 Label& L_failed) {
2473 BLOCK_COMMENT("arraycopy_range_checks:");
2475 // if (src_pos + length > arrayOop(src)->length() ) FAIL;
2477 const Register array_length = temp1; // scratch
2478 const Register end_pos = temp2; // scratch
2480 // Note: This next instruction may be in the delay slot of a branch:
2481 __ add(length, src_pos, end_pos); // src_pos + length
2482 __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2483 __ cmp(end_pos, array_length);
2484 __ br(Assembler::greater, false, Assembler::pn, L_failed);
2486 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2487 __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2488 __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2489 __ cmp(end_pos, array_length);
2490 __ br(Assembler::greater, false, Assembler::pn, L_failed);
2492 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2493 // Move with sign extension can be used since they are positive.
2494 __ delayed()->signx(src_pos, src_pos);
2495 __ signx(dst_pos, dst_pos);
2497 BLOCK_COMMENT("arraycopy_range_checks done");
2498 }
2501 //
2502 // Generate generic array copy stubs
2503 //
2504 // Input:
2505 // O0 - src oop
2506 // O1 - src_pos
2507 // O2 - dst oop
2508 // O3 - dst_pos
2509 // O4 - element count
2510 //
2511 // Output:
2512 // O0 == 0 - success
2513 // O0 == -1 - need to call System.arraycopy
2514 //
2515 address generate_generic_copy(const char *name) {
2517 Label L_failed, L_objArray;
2519 // Input registers
2520 const Register src = O0; // source array oop
2521 const Register src_pos = O1; // source position
2522 const Register dst = O2; // destination array oop
2523 const Register dst_pos = O3; // destination position
2524 const Register length = O4; // elements count
2526 // registers used as temp
2527 const Register G3_src_klass = G3; // source array klass
2528 const Register G4_dst_klass = G4; // destination array klass
2529 const Register G5_lh = G5; // layout handler
2530 const Register O5_temp = O5;
2532 __ align(CodeEntryAlignment);
2533 StubCodeMark mark(this, "StubRoutines", name);
2534 address start = __ pc();
2536 // bump this on entry, not on exit:
2537 inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2539 // In principle, the int arguments could be dirty.
2540 //assert_clean_int(src_pos, G1);
2541 //assert_clean_int(dst_pos, G1);
2542 //assert_clean_int(length, G1);
2544 //-----------------------------------------------------------------------
2545 // Assembler stubs will be used for this call to arraycopy
2546 // if the following conditions are met:
2547 //
2548 // (1) src and dst must not be null.
2549 // (2) src_pos must not be negative.
2550 // (3) dst_pos must not be negative.
2551 // (4) length must not be negative.
2552 // (5) src klass and dst klass should be the same and not NULL.
2553 // (6) src and dst should be arrays.
2554 // (7) src_pos + length must not exceed length of src.
2555 // (8) dst_pos + length must not exceed length of dst.
2556 BLOCK_COMMENT("arraycopy initial argument checks");
2558 // if (src == NULL) return -1;
2559 __ br_null(src, false, Assembler::pn, L_failed);
2561 // if (src_pos < 0) return -1;
2562 __ delayed()->tst(src_pos);
2563 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2564 __ delayed()->nop();
2566 // if (dst == NULL) return -1;
2567 __ br_null(dst, false, Assembler::pn, L_failed);
2569 // if (dst_pos < 0) return -1;
2570 __ delayed()->tst(dst_pos);
2571 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2573 // if (length < 0) return -1;
2574 __ delayed()->tst(length);
2575 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2577 BLOCK_COMMENT("arraycopy argument klass checks");
2578 // get src->klass()
2579 if (UseCompressedOops) {
2580 __ delayed()->nop(); // ??? not good
2581 __ load_klass(src, G3_src_klass);
2582 } else {
2583 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
2584 }
2586 #ifdef ASSERT
2587 // assert(src->klass() != NULL);
2588 BLOCK_COMMENT("assert klasses not null");
2589 { Label L_a, L_b;
2590 __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
2591 __ delayed()->nop();
2592 __ bind(L_a);
2593 __ stop("broken null klass");
2594 __ bind(L_b);
2595 __ load_klass(dst, G4_dst_klass);
2596 __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
2597 __ delayed()->mov(G0, G4_dst_klass); // scribble the temp
2598 BLOCK_COMMENT("assert done");
2599 }
2600 #endif
2602 // Load layout helper
2603 //
2604 // |array_tag| | header_size | element_type | |log2_element_size|
2605 // 32 30 24 16 8 2 0
2606 //
2607 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2608 //
2610 int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2611 Klass::layout_helper_offset_in_bytes();
2613 // Load 32-bits signed value. Use br() instruction with it to check icc.
2614 __ lduw(G3_src_klass, lh_offset, G5_lh);
2616 if (UseCompressedOops) {
2617 __ load_klass(dst, G4_dst_klass);
2618 }
2619 // Handle objArrays completely differently...
2620 juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2621 __ set(objArray_lh, O5_temp);
2622 __ cmp(G5_lh, O5_temp);
2623 __ br(Assembler::equal, false, Assembler::pt, L_objArray);
2624 if (UseCompressedOops) {
2625 __ delayed()->nop();
2626 } else {
2627 __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
2628 }
2630 // if (src->klass() != dst->klass()) return -1;
2631 __ cmp(G3_src_klass, G4_dst_klass);
2632 __ brx(Assembler::notEqual, false, Assembler::pn, L_failed);
2633 __ delayed()->nop();
2635 // if (!src->is_Array()) return -1;
2636 __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
2637 __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
2639 // At this point, it is known to be a typeArray (array_tag 0x3).
2640 #ifdef ASSERT
2641 __ delayed()->nop();
2642 { Label L;
2643 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2644 __ set(lh_prim_tag_in_place, O5_temp);
2645 __ cmp(G5_lh, O5_temp);
2646 __ br(Assembler::greaterEqual, false, Assembler::pt, L);
2647 __ delayed()->nop();
2648 __ stop("must be a primitive array");
2649 __ bind(L);
2650 }
2651 #else
2652 __ delayed(); // match next insn to prev branch
2653 #endif
2655 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2656 O5_temp, G4_dst_klass, L_failed);
2658 // typeArrayKlass
2659 //
2660 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2661 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2662 //
2664 const Register G4_offset = G4_dst_klass; // array offset
2665 const Register G3_elsize = G3_src_klass; // log2 element size
2667 __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
2668 __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
2669 __ add(src, G4_offset, src); // src array offset
2670 __ add(dst, G4_offset, dst); // dst array offset
2671 __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
2673 // next registers should be set before the jump to corresponding stub
2674 const Register from = O0; // source array address
2675 const Register to = O1; // destination array address
2676 const Register count = O2; // elements count
2678 // 'from', 'to', 'count' registers should be set in this order
2679 // since they are the same as 'src', 'src_pos', 'dst'.
2681 BLOCK_COMMENT("scale indexes to element size");
2682 __ sll_ptr(src_pos, G3_elsize, src_pos);
2683 __ sll_ptr(dst_pos, G3_elsize, dst_pos);
2684 __ add(src, src_pos, from); // src_addr
2685 __ add(dst, dst_pos, to); // dst_addr
2687 BLOCK_COMMENT("choose copy loop based on element size");
2688 __ cmp(G3_elsize, 0);
2689 __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jbyte_arraycopy);
2690 __ delayed()->signx(length, count); // length
2692 __ cmp(G3_elsize, LogBytesPerShort);
2693 __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jshort_arraycopy);
2694 __ delayed()->signx(length, count); // length
2696 __ cmp(G3_elsize, LogBytesPerInt);
2697 __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jint_arraycopy);
2698 __ delayed()->signx(length, count); // length
2699 #ifdef ASSERT
2700 { Label L;
2701 __ cmp(G3_elsize, LogBytesPerLong);
2702 __ br(Assembler::equal, false, Assembler::pt, L);
2703 __ delayed()->nop();
2704 __ stop("must be long copy, but elsize is wrong");
2705 __ bind(L);
2706 }
2707 #endif
2708 __ br(Assembler::always,false,Assembler::pt,StubRoutines::_jlong_arraycopy);
2709 __ delayed()->signx(length, count); // length
2711 // objArrayKlass
2712 __ BIND(L_objArray);
2713 // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
2715 Label L_plain_copy, L_checkcast_copy;
2716 // test array classes for subtyping
2717 __ cmp(G3_src_klass, G4_dst_klass); // usual case is exact equality
2718 __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
2719 __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
2721 // Identically typed arrays can be copied without element-wise checks.
2722 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2723 O5_temp, G5_lh, L_failed);
2725 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2726 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2727 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2728 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2729 __ add(src, src_pos, from); // src_addr
2730 __ add(dst, dst_pos, to); // dst_addr
2731 __ BIND(L_plain_copy);
2732 __ br(Assembler::always, false, Assembler::pt,StubRoutines::_oop_arraycopy);
2733 __ delayed()->signx(length, count); // length
2735 __ BIND(L_checkcast_copy);
2736 // live at this point: G3_src_klass, G4_dst_klass
2737 {
2738 // Before looking at dst.length, make sure dst is also an objArray.
2739 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
2740 __ cmp(G5_lh, O5_temp);
2741 __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
2743 // It is safe to examine both src.length and dst.length.
2744 __ delayed(); // match next insn to prev branch
2745 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2746 O5_temp, G5_lh, L_failed);
2748 // Marshal the base address arguments now, freeing registers.
2749 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2750 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2751 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2752 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2753 __ add(src, src_pos, from); // src_addr
2754 __ add(dst, dst_pos, to); // dst_addr
2755 __ signx(length, count); // length (reloaded)
2757 Register sco_temp = O3; // this register is free now
2758 assert_different_registers(from, to, count, sco_temp,
2759 G4_dst_klass, G3_src_klass);
2761 // Generate the type check.
2762 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2763 Klass::super_check_offset_offset_in_bytes());
2764 __ lduw(G4_dst_klass, sco_offset, sco_temp);
2765 generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
2766 O5_temp, L_plain_copy);
2768 // Fetch destination element klass from the objArrayKlass header.
2769 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2770 objArrayKlass::element_klass_offset_in_bytes());
2772 // the checkcast_copy loop needs two extra arguments:
2773 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass
2774 // lduw(O4, sco_offset, O3); // sco of elem klass
2776 __ br(Assembler::always, false, Assembler::pt, checkcast_copy_entry);
2777 __ delayed()->lduw(O4, sco_offset, O3);
2778 }
2780 __ BIND(L_failed);
2781 __ retl();
2782 __ delayed()->sub(G0, 1, O0); // return -1
2783 return start;
2784 }
2786 void generate_arraycopy_stubs() {
2788 // Note: the disjoint stubs must be generated first, some of
2789 // the conjoint stubs use them.
2790 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2791 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2792 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
2793 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
2794 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy");
2795 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
2796 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
2797 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
2798 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
2799 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy");
2801 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2802 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
2803 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy");
2804 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy");
2805 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy");
2806 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
2807 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
2808 #ifdef _LP64
2809 // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
2810 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
2811 #else
2812 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2813 #endif
2814 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2815 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2817 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2818 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
2819 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
2820 }
2822 void generate_initial() {
2823 // Generates all stubs and initializes the entry points
2825 //------------------------------------------------------------------------------------------------------------------------
2826 // entry points that exist in all platforms
2827 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
2828 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
2829 StubRoutines::_forward_exception_entry = generate_forward_exception();
2831 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
2832 StubRoutines::_catch_exception_entry = generate_catch_exception();
2834 //------------------------------------------------------------------------------------------------------------------------
2835 // entry points that are platform specific
2836 StubRoutines::Sparc::_test_stop_entry = generate_test_stop();
2838 StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine();
2839 StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
2841 #if !defined(COMPILER2) && !defined(_LP64)
2842 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2843 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
2844 StubRoutines::_atomic_add_entry = generate_atomic_add();
2845 StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
2846 StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
2847 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2848 StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
2849 #endif // COMPILER2 !=> _LP64
2850 }
2853 void generate_all() {
2854 // Generates all stubs and initializes the entry points
2856 // Generate partial_subtype_check first here since its code depends on
2857 // UseZeroBaseCompressedOops which is defined after heap initialization.
2858 StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check();
2859 // These entry points require SharedInfo::stack0 to be set up in non-core builds
2860 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
2861 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
2862 StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
2863 StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
2864 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2865 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2867 StubRoutines::_handler_for_unsafe_access_entry =
2868 generate_handler_for_unsafe_access();
2870 // support for verify_oop (must happen after universe_init)
2871 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
2873 // arraycopy stubs used by compilers
2874 generate_arraycopy_stubs();
2875 }
2878 public:
2879 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2880 // replace the standard masm with a special one:
2881 _masm = new MacroAssembler(code);
2883 _stub_count = !all ? 0x100 : 0x200;
2884 if (all) {
2885 generate_all();
2886 } else {
2887 generate_initial();
2888 }
2890 // make sure this stub is available for all local calls
2891 if (_atomic_add_stub.is_unbound()) {
2892 // generate a second time, if necessary
2893 (void) generate_atomic_add();
2894 }
2895 }
2898 private:
2899 int _stub_count;
2900 void stub_prolog(StubCodeDesc* cdesc) {
2901 # ifdef ASSERT
2902 // put extra information in the stub code, to make it more readable
2903 #ifdef _LP64
2904 // Write the high part of the address
2905 // [RGV] Check if there is a dependency on the size of this prolog
2906 __ emit_data((intptr_t)cdesc >> 32, relocInfo::none);
2907 #endif
2908 __ emit_data((intptr_t)cdesc, relocInfo::none);
2909 __ emit_data(++_stub_count, relocInfo::none);
2910 # endif
2911 align(true);
2912 }
2914 void align(bool at_header = false) {
2915 // %%%%% move this constant somewhere else
2916 // UltraSPARC cache line size is 8 instructions:
2917 const unsigned int icache_line_size = 32;
2918 const unsigned int icache_half_line_size = 16;
2920 if (at_header) {
2921 while ((intptr_t)(__ pc()) % icache_line_size != 0) {
2922 __ emit_data(0, relocInfo::none);
2923 }
2924 } else {
2925 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
2926 __ nop();
2927 }
2928 }
2929 }
2931 }; // end class declaration
2934 address StubGenerator::disjoint_byte_copy_entry = NULL;
2935 address StubGenerator::disjoint_short_copy_entry = NULL;
2936 address StubGenerator::disjoint_int_copy_entry = NULL;
2937 address StubGenerator::disjoint_long_copy_entry = NULL;
2938 address StubGenerator::disjoint_oop_copy_entry = NULL;
2940 address StubGenerator::byte_copy_entry = NULL;
2941 address StubGenerator::short_copy_entry = NULL;
2942 address StubGenerator::int_copy_entry = NULL;
2943 address StubGenerator::long_copy_entry = NULL;
2944 address StubGenerator::oop_copy_entry = NULL;
2946 address StubGenerator::checkcast_copy_entry = NULL;
2948 void StubGenerator_generate(CodeBuffer* code, bool all) {
2949 StubGenerator g(code, all);
2950 }