Wed, 02 Jul 2008 12:55:16 -0700
6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_stubGenerator_sparc.cpp.incl"
28 // Declaration and definition of StubGenerator (no .hpp file).
29 // For a more detailed description of the stub routine structure
30 // see the comment in stubRoutines.hpp.
32 #define __ _masm->
34 #ifdef PRODUCT
35 #define BLOCK_COMMENT(str) /* nothing */
36 #else
37 #define BLOCK_COMMENT(str) __ block_comment(str)
38 #endif
40 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
42 // Note: The register L7 is used as L7_thread_cache, and may not be used
43 // any other way within this module.
46 static const Register& Lstub_temp = L2;
48 // -------------------------------------------------------------------------------------------------------------------------
49 // Stub Code definitions
51 static address handle_unsafe_access() {
52 JavaThread* thread = JavaThread::current();
53 address pc = thread->saved_exception_pc();
54 address npc = thread->saved_exception_npc();
55 // pc is the instruction which we must emulate
56 // doing a no-op is fine: return garbage from the load
58 // request an async exception
59 thread->set_pending_unsafe_access_error();
61 // return address of next instruction to execute
62 return npc;
63 }
65 class StubGenerator: public StubCodeGenerator {
66 private:
68 #ifdef PRODUCT
69 #define inc_counter_np(a,b,c) (0)
70 #else
71 void inc_counter_np_(int& counter, Register t1, Register t2) {
72 Address counter_addr(t2, (address) &counter);
73 __ sethi(counter_addr);
74 __ ld(counter_addr, t1);
75 __ inc(t1);
76 __ st(t1, counter_addr);
77 }
78 #define inc_counter_np(counter, t1, t2) \
79 BLOCK_COMMENT("inc_counter " #counter); \
80 inc_counter_np_(counter, t1, t2);
81 #endif
83 //----------------------------------------------------------------------------------------------------
84 // Call stubs are used to call Java from C
86 address generate_call_stub(address& return_pc) {
87 StubCodeMark mark(this, "StubRoutines", "call_stub");
88 address start = __ pc();
90 // Incoming arguments:
91 //
92 // o0 : call wrapper address
93 // o1 : result (address)
94 // o2 : result type
95 // o3 : method
96 // o4 : (interpreter) entry point
97 // o5 : parameters (address)
98 // [sp + 0x5c]: parameter size (in words)
99 // [sp + 0x60]: thread
100 //
101 // +---------------+ <--- sp + 0
102 // | |
103 // . reg save area .
104 // | |
105 // +---------------+ <--- sp + 0x40
106 // | |
107 // . extra 7 slots .
108 // | |
109 // +---------------+ <--- sp + 0x5c
110 // | param. size |
111 // +---------------+ <--- sp + 0x60
112 // | thread |
113 // +---------------+
114 // | |
116 // note: if the link argument position changes, adjust
117 // the code in frame::entry_frame_call_wrapper()
119 const Argument link = Argument(0, false); // used only for GC
120 const Argument result = Argument(1, false);
121 const Argument result_type = Argument(2, false);
122 const Argument method = Argument(3, false);
123 const Argument entry_point = Argument(4, false);
124 const Argument parameters = Argument(5, false);
125 const Argument parameter_size = Argument(6, false);
126 const Argument thread = Argument(7, false);
128 // setup thread register
129 __ ld_ptr(thread.as_address(), G2_thread);
130 __ reinit_heapbase();
132 #ifdef ASSERT
133 // make sure we have no pending exceptions
134 { const Register t = G3_scratch;
135 Label L;
136 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
137 __ br_null(t, false, Assembler::pt, L);
138 __ delayed()->nop();
139 __ stop("StubRoutines::call_stub: entered with pending exception");
140 __ bind(L);
141 }
142 #endif
144 // create activation frame & allocate space for parameters
145 { const Register t = G3_scratch;
146 __ ld_ptr(parameter_size.as_address(), t); // get parameter size (in words)
147 __ add(t, frame::memory_parameter_word_sp_offset, t); // add space for save area (in words)
148 __ round_to(t, WordsPerLong); // make sure it is multiple of 2 (in words)
149 __ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
150 __ neg(t); // negate so it can be used with save
151 __ save(SP, t, SP); // setup new frame
152 }
154 // +---------------+ <--- sp + 0
155 // | |
156 // . reg save area .
157 // | |
158 // +---------------+ <--- sp + 0x40
159 // | |
160 // . extra 7 slots .
161 // | |
162 // +---------------+ <--- sp + 0x5c
163 // | empty slot | (only if parameter size is even)
164 // +---------------+
165 // | |
166 // . parameters .
167 // | |
168 // +---------------+ <--- fp + 0
169 // | |
170 // . reg save area .
171 // | |
172 // +---------------+ <--- fp + 0x40
173 // | |
174 // . extra 7 slots .
175 // | |
176 // +---------------+ <--- fp + 0x5c
177 // | param. size |
178 // +---------------+ <--- fp + 0x60
179 // | thread |
180 // +---------------+
181 // | |
183 // pass parameters if any
184 BLOCK_COMMENT("pass parameters if any");
185 { const Register src = parameters.as_in().as_register();
186 const Register dst = Lentry_args;
187 const Register tmp = G3_scratch;
188 const Register cnt = G4_scratch;
190 // test if any parameters & setup of Lentry_args
191 Label exit;
192 __ ld_ptr(parameter_size.as_in().as_address(), cnt); // parameter counter
193 __ add( FP, STACK_BIAS, dst );
194 __ tst(cnt);
195 __ br(Assembler::zero, false, Assembler::pn, exit);
196 __ delayed()->sub(dst, BytesPerWord, dst); // setup Lentry_args
198 // copy parameters if any
199 Label loop;
200 __ BIND(loop);
201 // Store tag first.
202 if (TaggedStackInterpreter) {
203 __ ld_ptr(src, 0, tmp);
204 __ add(src, BytesPerWord, src); // get next
205 __ st_ptr(tmp, dst, Interpreter::tag_offset_in_bytes());
206 }
207 // Store parameter value
208 __ ld_ptr(src, 0, tmp);
209 __ add(src, BytesPerWord, src);
210 __ st_ptr(tmp, dst, Interpreter::value_offset_in_bytes());
211 __ deccc(cnt);
212 __ br(Assembler::greater, false, Assembler::pt, loop);
213 __ delayed()->sub(dst, Interpreter::stackElementSize(), dst);
215 // done
216 __ BIND(exit);
217 }
219 // setup parameters, method & call Java function
220 #ifdef ASSERT
221 // layout_activation_impl checks it's notion of saved SP against
222 // this register, so if this changes update it as well.
223 const Register saved_SP = Lscratch;
224 __ mov(SP, saved_SP); // keep track of SP before call
225 #endif
227 // setup parameters
228 const Register t = G3_scratch;
229 __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
230 __ sll(t, Interpreter::logStackElementSize(), t); // compute number of bytes
231 __ sub(FP, t, Gargs); // setup parameter pointer
232 #ifdef _LP64
233 __ add( Gargs, STACK_BIAS, Gargs ); // Account for LP64 stack bias
234 #endif
235 __ mov(SP, O5_savedSP);
238 // do the call
239 //
240 // the following register must be setup:
241 //
242 // G2_thread
243 // G5_method
244 // Gargs
245 BLOCK_COMMENT("call Java function");
246 __ jmpl(entry_point.as_in().as_register(), G0, O7);
247 __ delayed()->mov(method.as_in().as_register(), G5_method); // setup method
249 BLOCK_COMMENT("call_stub_return_address:");
250 return_pc = __ pc();
252 // The callee, if it wasn't interpreted, can return with SP changed so
253 // we can no longer assert of change of SP.
255 // store result depending on type
256 // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
257 // is treated as T_INT)
258 { const Register addr = result .as_in().as_register();
259 const Register type = result_type.as_in().as_register();
260 Label is_long, is_float, is_double, is_object, exit;
261 __ cmp(type, T_OBJECT); __ br(Assembler::equal, false, Assembler::pn, is_object);
262 __ delayed()->cmp(type, T_FLOAT); __ br(Assembler::equal, false, Assembler::pn, is_float);
263 __ delayed()->cmp(type, T_DOUBLE); __ br(Assembler::equal, false, Assembler::pn, is_double);
264 __ delayed()->cmp(type, T_LONG); __ br(Assembler::equal, false, Assembler::pn, is_long);
265 __ delayed()->nop();
267 // store int result
268 __ st(O0, addr, G0);
270 __ BIND(exit);
271 __ ret();
272 __ delayed()->restore();
274 __ BIND(is_object);
275 __ ba(false, exit);
276 __ delayed()->st_ptr(O0, addr, G0);
278 __ BIND(is_float);
279 __ ba(false, exit);
280 __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
282 __ BIND(is_double);
283 __ ba(false, exit);
284 __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
286 __ BIND(is_long);
287 #ifdef _LP64
288 __ ba(false, exit);
289 __ delayed()->st_long(O0, addr, G0); // store entire long
290 #else
291 #if defined(COMPILER2)
292 // All return values are where we want them, except for Longs. C2 returns
293 // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
294 // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
295 // build we simply always use G1.
296 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
297 // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
298 // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
300 __ ba(false, exit);
301 __ delayed()->stx(G1, addr, G0); // store entire long
302 #else
303 __ st(O1, addr, BytesPerInt);
304 __ ba(false, exit);
305 __ delayed()->st(O0, addr, G0);
306 #endif /* COMPILER2 */
307 #endif /* _LP64 */
308 }
309 return start;
310 }
313 //----------------------------------------------------------------------------------------------------
314 // Return point for a Java call if there's an exception thrown in Java code.
315 // The exception is caught and transformed into a pending exception stored in
316 // JavaThread that can be tested from within the VM.
317 //
318 // Oexception: exception oop
320 address generate_catch_exception() {
321 StubCodeMark mark(this, "StubRoutines", "catch_exception");
323 address start = __ pc();
324 // verify that thread corresponds
325 __ verify_thread();
327 const Register& temp_reg = Gtemp;
328 Address pending_exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
329 Address exception_file_offset_addr(G2_thread, 0, in_bytes(Thread::exception_file_offset ()));
330 Address exception_line_offset_addr(G2_thread, 0, in_bytes(Thread::exception_line_offset ()));
332 // set pending exception
333 __ verify_oop(Oexception);
334 __ st_ptr(Oexception, pending_exception_addr);
335 __ set((intptr_t)__FILE__, temp_reg);
336 __ st_ptr(temp_reg, exception_file_offset_addr);
337 __ set((intptr_t)__LINE__, temp_reg);
338 __ st(temp_reg, exception_line_offset_addr);
340 // complete return to VM
341 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
343 Address stub_ret(temp_reg, StubRoutines::_call_stub_return_address);
344 __ jump_to(stub_ret);
345 __ delayed()->nop();
347 return start;
348 }
351 //----------------------------------------------------------------------------------------------------
352 // Continuation point for runtime calls returning with a pending exception
353 // The pending exception check happened in the runtime or native call stub
354 // The pending exception in Thread is converted into a Java-level exception
355 //
356 // Contract with Java-level exception handler: O0 = exception
357 // O1 = throwing pc
359 address generate_forward_exception() {
360 StubCodeMark mark(this, "StubRoutines", "forward_exception");
361 address start = __ pc();
363 // Upon entry, O7 has the return address returning into Java
364 // (interpreted or compiled) code; i.e. the return address
365 // becomes the throwing pc.
367 const Register& handler_reg = Gtemp;
369 Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
371 #ifdef ASSERT
372 // make sure that this code is only executed if there is a pending exception
373 { Label L;
374 __ ld_ptr(exception_addr, Gtemp);
375 __ br_notnull(Gtemp, false, Assembler::pt, L);
376 __ delayed()->nop();
377 __ stop("StubRoutines::forward exception: no pending exception (1)");
378 __ bind(L);
379 }
380 #endif
382 // compute exception handler into handler_reg
383 __ get_thread();
384 __ ld_ptr(exception_addr, Oexception);
385 __ verify_oop(Oexception);
386 __ save_frame(0); // compensates for compiler weakness
387 __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
388 BLOCK_COMMENT("call exception_handler_for_return_address");
389 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Lscratch);
390 __ mov(O0, handler_reg);
391 __ restore(); // compensates for compiler weakness
393 __ ld_ptr(exception_addr, Oexception);
394 __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
396 #ifdef ASSERT
397 // make sure exception is set
398 { Label L;
399 __ br_notnull(Oexception, false, Assembler::pt, L);
400 __ delayed()->nop();
401 __ stop("StubRoutines::forward exception: no pending exception (2)");
402 __ bind(L);
403 }
404 #endif
405 // jump to exception handler
406 __ jmp(handler_reg, 0);
407 // clear pending exception
408 __ delayed()->st_ptr(G0, exception_addr);
410 return start;
411 }
414 //------------------------------------------------------------------------------------------------------------------------
415 // Continuation point for throwing of implicit exceptions that are not handled in
416 // the current activation. Fabricates an exception oop and initiates normal
417 // exception dispatching in this frame. Only callee-saved registers are preserved
418 // (through the normal register window / RegisterMap handling).
419 // If the compiler needs all registers to be preserved between the fault
420 // point and the exception handler then it must assume responsibility for that in
421 // AbstractCompiler::continuation_for_implicit_null_exception or
422 // continuation_for_implicit_division_by_zero_exception. All other implicit
423 // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
424 // either at call sites or otherwise assume that stack unwinding will be initiated,
425 // so caller saved registers were assumed volatile in the compiler.
427 // Note that we generate only this stub into a RuntimeStub, because it needs to be
428 // properly traversed and ignored during GC, so we change the meaning of the "__"
429 // macro within this method.
430 #undef __
431 #define __ masm->
433 address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {
434 #ifdef ASSERT
435 int insts_size = VerifyThread ? 1 * K : 600;
436 #else
437 int insts_size = VerifyThread ? 1 * K : 256;
438 #endif /* ASSERT */
439 int locs_size = 32;
441 CodeBuffer code(name, insts_size, locs_size);
442 MacroAssembler* masm = new MacroAssembler(&code);
444 __ verify_thread();
446 // This is an inlined and slightly modified version of call_VM
447 // which has the ability to fetch the return PC out of thread-local storage
448 __ assert_not_delayed();
450 // Note that we always push a frame because on the SPARC
451 // architecture, for all of our implicit exception kinds at call
452 // sites, the implicit exception is taken before the callee frame
453 // is pushed.
454 __ save_frame(0);
456 int frame_complete = __ offset();
458 if (restore_saved_exception_pc) {
459 Address saved_exception_pc(G2_thread, 0, in_bytes(JavaThread::saved_exception_pc_offset()));
460 __ ld_ptr(saved_exception_pc, I7);
461 __ sub(I7, frame::pc_return_offset, I7);
462 }
464 // Note that we always have a runtime stub frame on the top of stack by this point
465 Register last_java_sp = SP;
466 // 64-bit last_java_sp is biased!
467 __ set_last_Java_frame(last_java_sp, G0);
468 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early
469 __ save_thread(noreg);
470 // do the call
471 BLOCK_COMMENT("call runtime_entry");
472 __ call(runtime_entry, relocInfo::runtime_call_type);
473 if (!VerifyThread)
474 __ delayed()->mov(G2_thread, O0); // pass thread as first argument
475 else
476 __ delayed()->nop(); // (thread already passed)
477 __ restore_thread(noreg);
478 __ reset_last_Java_frame();
480 // check for pending exceptions. use Gtemp as scratch register.
481 #ifdef ASSERT
482 Label L;
484 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
485 Register scratch_reg = Gtemp;
486 __ ld_ptr(exception_addr, scratch_reg);
487 __ br_notnull(scratch_reg, false, Assembler::pt, L);
488 __ delayed()->nop();
489 __ should_not_reach_here();
490 __ bind(L);
491 #endif // ASSERT
492 BLOCK_COMMENT("call forward_exception_entry");
493 __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
494 // we use O7 linkage so that forward_exception_entry has the issuing PC
495 __ delayed()->restore();
497 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
498 return stub->entry_point();
499 }
501 #undef __
502 #define __ _masm->
505 // Generate a routine that sets all the registers so we
506 // can tell if the stop routine prints them correctly.
507 address generate_test_stop() {
508 StubCodeMark mark(this, "StubRoutines", "test_stop");
509 address start = __ pc();
511 int i;
513 __ save_frame(0);
515 static jfloat zero = 0.0, one = 1.0;
517 // put addr in L0, then load through L0 to F0
518 __ set((intptr_t)&zero, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F0);
519 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
521 // use add to put 2..18 in F2..F18
522 for ( i = 2; i <= 18; ++i ) {
523 __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1), as_FloatRegister(i));
524 }
526 // Now put double 2 in F16, double 18 in F18
527 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
528 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
530 // use add to put 20..32 in F20..F32
531 for (i = 20; i < 32; i += 2) {
532 __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2), as_FloatRegister(i));
533 }
535 // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
536 for ( i = 0; i < 8; ++i ) {
537 if (i < 6) {
538 __ set( i, as_iRegister(i));
539 __ set(16 + i, as_oRegister(i));
540 __ set(24 + i, as_gRegister(i));
541 }
542 __ set( 8 + i, as_lRegister(i));
543 }
545 __ stop("testing stop");
548 __ ret();
549 __ delayed()->restore();
551 return start;
552 }
555 address generate_stop_subroutine() {
556 StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
557 address start = __ pc();
559 __ stop_subroutine();
561 return start;
562 }
564 address generate_flush_callers_register_windows() {
565 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
566 address start = __ pc();
568 __ flush_windows();
569 __ retl(false);
570 __ delayed()->add( FP, STACK_BIAS, O0 );
571 // The returned value must be a stack pointer whose register save area
572 // is flushed, and will stay flushed while the caller executes.
574 return start;
575 }
577 // Helper functions for v8 atomic operations.
578 //
579 void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
580 if (mark_oop_reg == noreg) {
581 address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
582 __ set((intptr_t)lock_ptr, lock_ptr_reg);
583 } else {
584 assert(scratch_reg != noreg, "just checking");
585 address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
586 __ set((intptr_t)lock_ptr, lock_ptr_reg);
587 __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
588 __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
589 }
590 }
592 void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
594 get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
595 __ set(StubRoutines::Sparc::locked, lock_reg);
596 // Initialize yield counter
597 __ mov(G0,yield_reg);
599 __ BIND(retry);
600 __ cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
601 __ br(Assembler::less, false, Assembler::pt, dontyield);
602 __ delayed()->nop();
604 // This code can only be called from inside the VM, this
605 // stub is only invoked from Atomic::add(). We do not
606 // want to use call_VM, because _last_java_sp and such
607 // must already be set.
608 //
609 // Save the regs and make space for a C call
610 __ save(SP, -96, SP);
611 __ save_all_globals_into_locals();
612 BLOCK_COMMENT("call os::naked_sleep");
613 __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
614 __ delayed()->nop();
615 __ restore_globals_from_locals();
616 __ restore();
617 // reset the counter
618 __ mov(G0,yield_reg);
620 __ BIND(dontyield);
622 // try to get lock
623 __ swap(lock_ptr_reg, 0, lock_reg);
625 // did we get the lock?
626 __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
627 __ br(Assembler::notEqual, true, Assembler::pn, retry);
628 __ delayed()->add(yield_reg,1,yield_reg);
630 // yes, got lock. do the operation here.
631 }
633 void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
634 __ st(lock_reg, lock_ptr_reg, 0); // unlock
635 }
637 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
638 //
639 // Arguments :
640 //
641 // exchange_value: O0
642 // dest: O1
643 //
644 // Results:
645 //
646 // O0: the value previously stored in dest
647 //
648 address generate_atomic_xchg() {
649 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
650 address start = __ pc();
652 if (UseCASForSwap) {
653 // Use CAS instead of swap, just in case the MP hardware
654 // prefers to work with just one kind of synch. instruction.
655 Label retry;
656 __ BIND(retry);
657 __ mov(O0, O3); // scratch copy of exchange value
658 __ ld(O1, 0, O2); // observe the previous value
659 // try to replace O2 with O3
660 __ cas_under_lock(O1, O2, O3,
661 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
662 __ cmp(O2, O3);
663 __ br(Assembler::notEqual, false, Assembler::pn, retry);
664 __ delayed()->nop();
666 __ retl(false);
667 __ delayed()->mov(O2, O0); // report previous value to caller
669 } else {
670 if (VM_Version::v9_instructions_work()) {
671 __ retl(false);
672 __ delayed()->swap(O1, 0, O0);
673 } else {
674 const Register& lock_reg = O2;
675 const Register& lock_ptr_reg = O3;
676 const Register& yield_reg = O4;
678 Label retry;
679 Label dontyield;
681 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
682 // got the lock, do the swap
683 __ swap(O1, 0, O0);
685 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
686 __ retl(false);
687 __ delayed()->nop();
688 }
689 }
691 return start;
692 }
695 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
696 //
697 // Arguments :
698 //
699 // exchange_value: O0
700 // dest: O1
701 // compare_value: O2
702 //
703 // Results:
704 //
705 // O0: the value previously stored in dest
706 //
707 // Overwrites (v8): O3,O4,O5
708 //
709 address generate_atomic_cmpxchg() {
710 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
711 address start = __ pc();
713 // cmpxchg(dest, compare_value, exchange_value)
714 __ cas_under_lock(O1, O2, O0,
715 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
716 __ retl(false);
717 __ delayed()->nop();
719 return start;
720 }
722 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
723 //
724 // Arguments :
725 //
726 // exchange_value: O1:O0
727 // dest: O2
728 // compare_value: O4:O3
729 //
730 // Results:
731 //
732 // O1:O0: the value previously stored in dest
733 //
734 // This only works on V9, on V8 we don't generate any
735 // code and just return NULL.
736 //
737 // Overwrites: G1,G2,G3
738 //
739 address generate_atomic_cmpxchg_long() {
740 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
741 address start = __ pc();
743 if (!VM_Version::supports_cx8())
744 return NULL;;
745 __ sllx(O0, 32, O0);
746 __ srl(O1, 0, O1);
747 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value
748 __ sllx(O3, 32, O3);
749 __ srl(O4, 0, O4);
750 __ or3(O3,O4,O3); // O3 holds 64-bit value from exchange_value
751 __ casx(O2, O3, O0);
752 __ srl(O0, 0, O1); // unpacked return value in O1:O0
753 __ retl(false);
754 __ delayed()->srlx(O0, 32, O0);
756 return start;
757 }
760 // Support for jint Atomic::add(jint add_value, volatile jint* dest).
761 //
762 // Arguments :
763 //
764 // add_value: O0 (e.g., +1 or -1)
765 // dest: O1
766 //
767 // Results:
768 //
769 // O0: the new value stored in dest
770 //
771 // Overwrites (v9): O3
772 // Overwrites (v8): O3,O4,O5
773 //
774 address generate_atomic_add() {
775 StubCodeMark mark(this, "StubRoutines", "atomic_add");
776 address start = __ pc();
777 __ BIND(_atomic_add_stub);
779 if (VM_Version::v9_instructions_work()) {
780 Label(retry);
781 __ BIND(retry);
783 __ lduw(O1, 0, O2);
784 __ add(O0, O2, O3);
785 __ cas(O1, O2, O3);
786 __ cmp( O2, O3);
787 __ br(Assembler::notEqual, false, Assembler::pn, retry);
788 __ delayed()->nop();
789 __ retl(false);
790 __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
791 } else {
792 const Register& lock_reg = O2;
793 const Register& lock_ptr_reg = O3;
794 const Register& value_reg = O4;
795 const Register& yield_reg = O5;
797 Label(retry);
798 Label(dontyield);
800 generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
801 // got lock, do the increment
802 __ ld(O1, 0, value_reg);
803 __ add(O0, value_reg, value_reg);
804 __ st(value_reg, O1, 0);
806 // %%% only for RMO and PSO
807 __ membar(Assembler::StoreStore);
809 generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
811 __ retl(false);
812 __ delayed()->mov(value_reg, O0);
813 }
815 return start;
816 }
817 Label _atomic_add_stub; // called from other stubs
820 // Support for void OrderAccess::fence().
821 //
822 address generate_fence() {
823 StubCodeMark mark(this, "StubRoutines", "fence");
824 address start = __ pc();
826 __ membar(Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore |
827 Assembler::StoreLoad | Assembler::StoreStore));
828 __ retl(false);
829 __ delayed()->nop();
831 return start;
832 }
835 //------------------------------------------------------------------------------------------------------------------------
836 // The following routine generates a subroutine to throw an asynchronous
837 // UnknownError when an unsafe access gets a fault that could not be
838 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.)
839 //
840 // Arguments :
841 //
842 // trapping PC: O7
843 //
844 // Results:
845 // posts an asynchronous exception, skips the trapping instruction
846 //
848 address generate_handler_for_unsafe_access() {
849 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
850 address start = __ pc();
852 const int preserve_register_words = (64 * 2);
853 Address preserve_addr(FP, 0, (-preserve_register_words * wordSize) + STACK_BIAS);
855 Register Lthread = L7_thread_cache;
856 int i;
858 __ save_frame(0);
859 __ mov(G1, L1);
860 __ mov(G2, L2);
861 __ mov(G3, L3);
862 __ mov(G4, L4);
863 __ mov(G5, L5);
864 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
865 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
866 }
868 address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
869 BLOCK_COMMENT("call handle_unsafe_access");
870 __ call(entry_point, relocInfo::runtime_call_type);
871 __ delayed()->nop();
873 __ mov(L1, G1);
874 __ mov(L2, G2);
875 __ mov(L3, G3);
876 __ mov(L4, G4);
877 __ mov(L5, G5);
878 for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
879 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
880 }
882 __ verify_thread();
884 __ jmp(O0, 0);
885 __ delayed()->restore();
887 return start;
888 }
891 // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
892 // Arguments :
893 //
894 // ret : O0, returned
895 // icc/xcc: set as O0 (depending on wordSize)
896 // sub : O1, argument, not changed
897 // super: O2, argument, not changed
898 // raddr: O7, blown by call
899 address generate_partial_subtype_check() {
900 __ align(CodeEntryAlignment);
901 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
902 address start = __ pc();
903 Label loop, miss;
905 // Compare super with sub directly, since super is not in its own SSA.
906 // The compiler used to emit this test, but we fold it in here,
907 // to increase overall code density, with no real loss of speed.
908 { Label L;
909 __ cmp(O1, O2);
910 __ brx(Assembler::notEqual, false, Assembler::pt, L);
911 __ delayed()->nop();
912 __ retl();
913 __ delayed()->addcc(G0,0,O0); // set Z flags, zero result
914 __ bind(L);
915 }
917 #if defined(COMPILER2) && !defined(_LP64)
918 // Do not use a 'save' because it blows the 64-bit O registers.
919 __ add(SP,-4*wordSize,SP); // Make space for 4 temps (stack must be 2 words aligned)
920 __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
921 __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
922 __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
923 __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
924 Register Rret = O0;
925 Register Rsub = O1;
926 Register Rsuper = O2;
927 #else
928 __ save_frame(0);
929 Register Rret = I0;
930 Register Rsub = I1;
931 Register Rsuper = I2;
932 #endif
934 Register L0_ary_len = L0;
935 Register L1_ary_ptr = L1;
936 Register L2_super = L2;
937 Register L3_index = L3;
939 #ifdef _LP64
940 Register L4_ooptmp = L4;
942 if (UseCompressedOops) {
943 // this must be under UseCompressedOops check, as we rely upon fact
944 // that L4 not clobbered in C2 on 32-bit platforms, where we do explicit save
945 // on stack, see several lines above
946 __ encode_heap_oop(Rsuper, L4_ooptmp);
947 }
948 #endif
950 inc_counter_np(SharedRuntime::_partial_subtype_ctr, L0, L1);
952 __ ld_ptr( Rsub, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes(), L3 );
953 __ lduw(L3,arrayOopDesc::length_offset_in_bytes(),L0_ary_len);
954 __ add(L3,arrayOopDesc::base_offset_in_bytes(T_OBJECT),L1_ary_ptr);
955 __ clr(L3_index); // zero index
956 // Load a little early; will load 1 off the end of the array.
957 // Ok for now; revisit if we have other uses of this routine.
958 if (UseCompressedOops) {
959 __ ld(L1_ary_ptr,0,L2_super);// Will load a little early
960 } else {
961 __ ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
962 }
964 assert(heapOopSize != 0, "heapOopSize should be initialized");
965 // The scan loop
966 __ BIND(loop);
967 __ add(L1_ary_ptr, heapOopSize, L1_ary_ptr); // Bump by OOP size
968 __ cmp(L3_index,L0_ary_len);
969 __ br(Assembler::equal,false,Assembler::pn,miss);
970 __ delayed()->inc(L3_index); // Bump index
972 if (UseCompressedOops) {
973 #ifdef _LP64
974 __ subcc(L2_super,L4_ooptmp,Rret); // Check for match; zero in Rret for a hit
975 __ br( Assembler::notEqual, false, Assembler::pt, loop );
976 __ delayed()->ld(L1_ary_ptr,0,L2_super);// Will load a little early
977 #else
978 ShouldNotReachHere();
979 #endif
980 } else {
981 __ subcc(L2_super,Rsuper,Rret); // Check for match; zero in Rret for a hit
982 __ brx( Assembler::notEqual, false, Assembler::pt, loop );
983 __ delayed()->ld_ptr(L1_ary_ptr,0,L2_super);// Will load a little early
984 }
986 // Got a hit; report success; set cache. Cache load doesn't
987 // happen here; for speed it is directly emitted by the compiler.
988 __ st_ptr( Rsuper, Rsub, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
990 #if defined(COMPILER2) && !defined(_LP64)
991 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
992 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
993 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
994 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
995 __ retl(); // Result in Rret is zero; flags set to Z
996 __ delayed()->add(SP,4*wordSize,SP);
997 #else
998 __ ret(); // Result in Rret is zero; flags set to Z
999 __ delayed()->restore();
1000 #endif
1002 // Hit or miss falls through here
1003 __ BIND(miss);
1004 __ addcc(G0,1,Rret); // set NZ flags, NZ result
1006 #if defined(COMPILER2) && !defined(_LP64)
1007 __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
1008 __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
1009 __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
1010 __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
1011 __ retl(); // Result in Rret is != 0; flags set to NZ
1012 __ delayed()->add(SP,4*wordSize,SP);
1013 #else
1014 __ ret(); // Result in Rret is != 0; flags set to NZ
1015 __ delayed()->restore();
1016 #endif
1018 return start;
1019 }
1022 // Called from MacroAssembler::verify_oop
1023 //
1024 address generate_verify_oop_subroutine() {
1025 StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
1027 address start = __ pc();
1029 __ verify_oop_subroutine();
1031 return start;
1032 }
1034 static address disjoint_byte_copy_entry;
1035 static address disjoint_short_copy_entry;
1036 static address disjoint_int_copy_entry;
1037 static address disjoint_long_copy_entry;
1038 static address disjoint_oop_copy_entry;
1040 static address byte_copy_entry;
1041 static address short_copy_entry;
1042 static address int_copy_entry;
1043 static address long_copy_entry;
1044 static address oop_copy_entry;
1046 static address checkcast_copy_entry;
1048 //
1049 // Verify that a register contains clean 32-bits positive value
1050 // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
1051 //
1052 // Input:
1053 // Rint - 32-bits value
1054 // Rtmp - scratch
1055 //
1056 void assert_clean_int(Register Rint, Register Rtmp) {
1057 #if defined(ASSERT) && defined(_LP64)
1058 __ signx(Rint, Rtmp);
1059 __ cmp(Rint, Rtmp);
1060 __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
1061 #endif
1062 }
1064 //
1065 // Generate overlap test for array copy stubs
1066 //
1067 // Input:
1068 // O0 - array1
1069 // O1 - array2
1070 // O2 - element count
1071 //
1072 // Kills temps: O3, O4
1073 //
1074 void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1075 assert(no_overlap_target != NULL, "must be generated");
1076 array_overlap_test(no_overlap_target, NULL, log2_elem_size);
1077 }
1078 void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
1079 array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
1080 }
1081 void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
1082 const Register from = O0;
1083 const Register to = O1;
1084 const Register count = O2;
1085 const Register to_from = O3; // to - from
1086 const Register byte_count = O4; // count << log2_elem_size
1088 __ subcc(to, from, to_from);
1089 __ sll_ptr(count, log2_elem_size, byte_count);
1090 if (NOLp == NULL)
1091 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
1092 else
1093 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
1094 __ delayed()->cmp(to_from, byte_count);
1095 if (NOLp == NULL)
1096 __ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target);
1097 else
1098 __ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp));
1099 __ delayed()->nop();
1100 }
1102 //
1103 // Generate pre-write barrier for array.
1104 //
1105 // Input:
1106 // addr - register containing starting address
1107 // count - register containing element count
1108 // tmp - scratch register
1109 //
1110 // The input registers are overwritten.
1111 //
1112 void gen_write_ref_array_pre_barrier(Register addr, Register count) {
1113 #if 0 // G1 only
1114 BarrierSet* bs = Universe::heap()->barrier_set();
1115 if (bs->has_write_ref_pre_barrier()) {
1116 assert(bs->has_write_ref_array_pre_opt(),
1117 "Else unsupported barrier set.");
1119 assert(addr->is_global() && count->is_global(),
1120 "If not, then we have to fix this code to handle more "
1121 "general cases.");
1122 // Get some new fresh output registers.
1123 __ save_frame(0);
1124 // Save the necessary global regs... will be used after.
1125 __ mov(addr, L0);
1126 __ mov(count, L1);
1128 __ mov(addr, O0);
1129 // Get the count into O1
1130 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
1131 __ delayed()->mov(count, O1);
1132 __ mov(L0, addr);
1133 __ mov(L1, count);
1134 __ restore();
1135 }
1136 #endif // 0
1137 }
1138 //
1139 // Generate post-write barrier for array.
1140 //
1141 // Input:
1142 // addr - register containing starting address
1143 // count - register containing element count
1144 // tmp - scratch register
1145 //
1146 // The input registers are overwritten.
1147 //
1148 void gen_write_ref_array_post_barrier(Register addr, Register count,
1149 Register tmp) {
1150 BarrierSet* bs = Universe::heap()->barrier_set();
1152 switch (bs->kind()) {
1153 #if 0 // G1 - only
1154 case BarrierSet::G1SATBCT:
1155 case BarrierSet::G1SATBCTLogging:
1156 {
1157 assert(addr->is_global() && count->is_global(),
1158 "If not, then we have to fix this code to handle more "
1159 "general cases.");
1160 // Get some new fresh output registers.
1161 __ save_frame(0);
1162 __ mov(addr, O0);
1163 __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1164 __ delayed()->mov(count, O1);
1165 __ restore();
1166 }
1167 break;
1168 #endif // 0 G1 - only
1169 case BarrierSet::CardTableModRef:
1170 case BarrierSet::CardTableExtension:
1171 {
1172 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1173 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1174 assert_different_registers(addr, count, tmp);
1176 Label L_loop;
1178 __ sll_ptr(count, LogBytesPerHeapOop, count);
1179 __ sub(count, BytesPerHeapOop, count);
1180 __ add(count, addr, count);
1181 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1182 __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
1183 __ srl_ptr(count, CardTableModRefBS::card_shift, count);
1184 __ sub(count, addr, count);
1185 Address rs(tmp, (address)ct->byte_map_base);
1186 __ load_address(rs);
1187 __ BIND(L_loop);
1188 __ stb(G0, rs.base(), addr);
1189 __ subcc(count, 1, count);
1190 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1191 __ delayed()->add(addr, 1, addr);
1193 }
1194 break;
1195 case BarrierSet::ModRef:
1196 break;
1197 default :
1198 ShouldNotReachHere();
1200 }
1201 }
1204 // Copy big chunks forward with shift
1205 //
1206 // Inputs:
1207 // from - source arrays
1208 // to - destination array aligned to 8-bytes
1209 // count - elements count to copy >= the count equivalent to 16 bytes
1210 // count_dec - elements count's decrement equivalent to 16 bytes
1211 // L_copy_bytes - copy exit label
1212 //
1213 void copy_16_bytes_forward_with_shift(Register from, Register to,
1214 Register count, int count_dec, Label& L_copy_bytes) {
1215 Label L_loop, L_aligned_copy, L_copy_last_bytes;
1217 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1218 __ andcc(from, 7, G1); // misaligned bytes
1219 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1220 __ delayed()->nop();
1222 const Register left_shift = G1; // left shift bit counter
1223 const Register right_shift = G5; // right shift bit counter
1225 __ sll(G1, LogBitsPerByte, left_shift);
1226 __ mov(64, right_shift);
1227 __ sub(right_shift, left_shift, right_shift);
1229 //
1230 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1231 // to form 2 aligned 8-bytes chunks to store.
1232 //
1233 __ deccc(count, count_dec); // Pre-decrement 'count'
1234 __ andn(from, 7, from); // Align address
1235 __ ldx(from, 0, O3);
1236 __ inc(from, 8);
1237 __ align(16);
1238 __ BIND(L_loop);
1239 __ ldx(from, 0, O4);
1240 __ deccc(count, count_dec); // Can we do next iteration after this one?
1241 __ ldx(from, 8, G4);
1242 __ inc(to, 16);
1243 __ inc(from, 16);
1244 __ sllx(O3, left_shift, O3);
1245 __ srlx(O4, right_shift, G3);
1246 __ bset(G3, O3);
1247 __ stx(O3, to, -16);
1248 __ sllx(O4, left_shift, O4);
1249 __ srlx(G4, right_shift, G3);
1250 __ bset(G3, O4);
1251 __ stx(O4, to, -8);
1252 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1253 __ delayed()->mov(G4, O3);
1255 __ inccc(count, count_dec>>1 ); // + 8 bytes
1256 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1257 __ delayed()->inc(count, count_dec>>1); // restore 'count'
1259 // copy 8 bytes, part of them already loaded in O3
1260 __ ldx(from, 0, O4);
1261 __ inc(to, 8);
1262 __ inc(from, 8);
1263 __ sllx(O3, left_shift, O3);
1264 __ srlx(O4, right_shift, G3);
1265 __ bset(O3, G3);
1266 __ stx(G3, to, -8);
1268 __ BIND(L_copy_last_bytes);
1269 __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1270 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1271 __ delayed()->sub(from, right_shift, from); // restore address
1273 __ BIND(L_aligned_copy);
1274 }
1276 // Copy big chunks backward with shift
1277 //
1278 // Inputs:
1279 // end_from - source arrays end address
1280 // end_to - destination array end address aligned to 8-bytes
1281 // count - elements count to copy >= the count equivalent to 16 bytes
1282 // count_dec - elements count's decrement equivalent to 16 bytes
1283 // L_aligned_copy - aligned copy exit label
1284 // L_copy_bytes - copy exit label
1285 //
1286 void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1287 Register count, int count_dec,
1288 Label& L_aligned_copy, Label& L_copy_bytes) {
1289 Label L_loop, L_copy_last_bytes;
1291 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1292 __ andcc(end_from, 7, G1); // misaligned bytes
1293 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1294 __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1296 const Register left_shift = G1; // left shift bit counter
1297 const Register right_shift = G5; // right shift bit counter
1299 __ sll(G1, LogBitsPerByte, left_shift);
1300 __ mov(64, right_shift);
1301 __ sub(right_shift, left_shift, right_shift);
1303 //
1304 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1305 // to form 2 aligned 8-bytes chunks to store.
1306 //
1307 __ andn(end_from, 7, end_from); // Align address
1308 __ ldx(end_from, 0, O3);
1309 __ align(16);
1310 __ BIND(L_loop);
1311 __ ldx(end_from, -8, O4);
1312 __ deccc(count, count_dec); // Can we do next iteration after this one?
1313 __ ldx(end_from, -16, G4);
1314 __ dec(end_to, 16);
1315 __ dec(end_from, 16);
1316 __ srlx(O3, right_shift, O3);
1317 __ sllx(O4, left_shift, G3);
1318 __ bset(G3, O3);
1319 __ stx(O3, end_to, 8);
1320 __ srlx(O4, right_shift, O4);
1321 __ sllx(G4, left_shift, G3);
1322 __ bset(G3, O4);
1323 __ stx(O4, end_to, 0);
1324 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1325 __ delayed()->mov(G4, O3);
1327 __ inccc(count, count_dec>>1 ); // + 8 bytes
1328 __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1329 __ delayed()->inc(count, count_dec>>1); // restore 'count'
1331 // copy 8 bytes, part of them already loaded in O3
1332 __ ldx(end_from, -8, O4);
1333 __ dec(end_to, 8);
1334 __ dec(end_from, 8);
1335 __ srlx(O3, right_shift, O3);
1336 __ sllx(O4, left_shift, G3);
1337 __ bset(O3, G3);
1338 __ stx(G3, end_to, 0);
1340 __ BIND(L_copy_last_bytes);
1341 __ srl(left_shift, LogBitsPerByte, left_shift); // misaligned bytes
1342 __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1343 __ delayed()->add(end_from, left_shift, end_from); // restore address
1344 }
1346 //
1347 // Generate stub for disjoint byte copy. If "aligned" is true, the
1348 // "from" and "to" addresses are assumed to be heapword aligned.
1349 //
1350 // Arguments for generated stub:
1351 // from: O0
1352 // to: O1
1353 // count: O2 treated as signed
1354 //
1355 address generate_disjoint_byte_copy(bool aligned, const char * name) {
1356 __ align(CodeEntryAlignment);
1357 StubCodeMark mark(this, "StubRoutines", name);
1358 address start = __ pc();
1360 Label L_skip_alignment, L_align;
1361 Label L_copy_byte, L_copy_byte_loop, L_exit;
1363 const Register from = O0; // source array address
1364 const Register to = O1; // destination array address
1365 const Register count = O2; // elements count
1366 const Register offset = O5; // offset from start of arrays
1367 // O3, O4, G3, G4 are used as temp registers
1369 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1371 if (!aligned) disjoint_byte_copy_entry = __ pc();
1372 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1373 if (!aligned) BLOCK_COMMENT("Entry:");
1375 // for short arrays, just do single element copy
1376 __ cmp(count, 23); // 16 + 7
1377 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1378 __ delayed()->mov(G0, offset);
1380 if (aligned) {
1381 // 'aligned' == true when it is known statically during compilation
1382 // of this arraycopy call site that both 'from' and 'to' addresses
1383 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1384 //
1385 // Aligned arrays have 4 bytes alignment in 32-bits VM
1386 // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1387 //
1388 #ifndef _LP64
1389 // copy a 4-bytes word if necessary to align 'to' to 8 bytes
1390 __ andcc(to, 7, G0);
1391 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
1392 __ delayed()->ld(from, 0, O3);
1393 __ inc(from, 4);
1394 __ inc(to, 4);
1395 __ dec(count, 4);
1396 __ st(O3, to, -4);
1397 __ BIND(L_skip_alignment);
1398 #endif
1399 } else {
1400 // copy bytes to align 'to' on 8 byte boundary
1401 __ andcc(to, 7, G1); // misaligned bytes
1402 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1403 __ delayed()->neg(G1);
1404 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment
1405 __ sub(count, G1, count);
1406 __ BIND(L_align);
1407 __ ldub(from, 0, O3);
1408 __ deccc(G1);
1409 __ inc(from);
1410 __ stb(O3, to, 0);
1411 __ br(Assembler::notZero, false, Assembler::pt, L_align);
1412 __ delayed()->inc(to);
1413 __ BIND(L_skip_alignment);
1414 }
1415 #ifdef _LP64
1416 if (!aligned)
1417 #endif
1418 {
1419 // Copy with shift 16 bytes per iteration if arrays do not have
1420 // the same alignment mod 8, otherwise fall through to the next
1421 // code for aligned copy.
1422 // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1423 // Also jump over aligned copy after the copy with shift completed.
1425 copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte);
1426 }
1428 // Both array are 8 bytes aligned, copy 16 bytes at a time
1429 __ and3(count, 7, G4); // Save count
1430 __ srl(count, 3, count);
1431 generate_disjoint_long_copy_core(aligned);
1432 __ mov(G4, count); // Restore count
1434 // copy tailing bytes
1435 __ BIND(L_copy_byte);
1436 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1437 __ delayed()->nop();
1438 __ align(16);
1439 __ BIND(L_copy_byte_loop);
1440 __ ldub(from, offset, O3);
1441 __ deccc(count);
1442 __ stb(O3, to, offset);
1443 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1444 __ delayed()->inc(offset);
1446 __ BIND(L_exit);
1447 // O3, O4 are used as temp registers
1448 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1449 __ retl();
1450 __ delayed()->mov(G0, O0); // return 0
1451 return start;
1452 }
1454 //
1455 // Generate stub for conjoint byte copy. If "aligned" is true, the
1456 // "from" and "to" addresses are assumed to be heapword aligned.
1457 //
1458 // Arguments for generated stub:
1459 // from: O0
1460 // to: O1
1461 // count: O2 treated as signed
1462 //
1463 address generate_conjoint_byte_copy(bool aligned, const char * name) {
1464 // Do reverse copy.
1466 __ align(CodeEntryAlignment);
1467 StubCodeMark mark(this, "StubRoutines", name);
1468 address start = __ pc();
1469 address nooverlap_target = aligned ?
1470 StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
1471 disjoint_byte_copy_entry;
1473 Label L_skip_alignment, L_align, L_aligned_copy;
1474 Label L_copy_byte, L_copy_byte_loop, L_exit;
1476 const Register from = O0; // source array address
1477 const Register to = O1; // destination array address
1478 const Register count = O2; // elements count
1479 const Register end_from = from; // source array end address
1480 const Register end_to = to; // destination array end address
1482 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1484 if (!aligned) byte_copy_entry = __ pc();
1485 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1486 if (!aligned) BLOCK_COMMENT("Entry:");
1488 array_overlap_test(nooverlap_target, 0);
1490 __ add(to, count, end_to); // offset after last copied element
1492 // for short arrays, just do single element copy
1493 __ cmp(count, 23); // 16 + 7
1494 __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1495 __ delayed()->add(from, count, end_from);
1497 {
1498 // Align end of arrays since they could be not aligned even
1499 // when arrays itself are aligned.
1501 // copy bytes to align 'end_to' on 8 byte boundary
1502 __ andcc(end_to, 7, G1); // misaligned bytes
1503 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1504 __ delayed()->nop();
1505 __ sub(count, G1, count);
1506 __ BIND(L_align);
1507 __ dec(end_from);
1508 __ dec(end_to);
1509 __ ldub(end_from, 0, O3);
1510 __ deccc(G1);
1511 __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1512 __ delayed()->stb(O3, end_to, 0);
1513 __ BIND(L_skip_alignment);
1514 }
1515 #ifdef _LP64
1516 if (aligned) {
1517 // Both arrays are aligned to 8-bytes in 64-bits VM.
1518 // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1519 // in unaligned case.
1520 __ dec(count, 16);
1521 } else
1522 #endif
1523 {
1524 // Copy with shift 16 bytes per iteration if arrays do not have
1525 // the same alignment mod 8, otherwise jump to the next
1526 // code for aligned copy (and substracting 16 from 'count' before jump).
1527 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1528 // Also jump over aligned copy after the copy with shift completed.
1530 copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1531 L_aligned_copy, L_copy_byte);
1532 }
1533 // copy 4 elements (16 bytes) at a time
1534 __ align(16);
1535 __ BIND(L_aligned_copy);
1536 __ dec(end_from, 16);
1537 __ ldx(end_from, 8, O3);
1538 __ ldx(end_from, 0, O4);
1539 __ dec(end_to, 16);
1540 __ deccc(count, 16);
1541 __ stx(O3, end_to, 8);
1542 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1543 __ delayed()->stx(O4, end_to, 0);
1544 __ inc(count, 16);
1546 // copy 1 element (2 bytes) at a time
1547 __ BIND(L_copy_byte);
1548 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1549 __ delayed()->nop();
1550 __ align(16);
1551 __ BIND(L_copy_byte_loop);
1552 __ dec(end_from);
1553 __ dec(end_to);
1554 __ ldub(end_from, 0, O4);
1555 __ deccc(count);
1556 __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1557 __ delayed()->stb(O4, end_to, 0);
1559 __ BIND(L_exit);
1560 // O3, O4 are used as temp registers
1561 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1562 __ retl();
1563 __ delayed()->mov(G0, O0); // return 0
1564 return start;
1565 }
1567 //
1568 // Generate stub for disjoint short copy. If "aligned" is true, the
1569 // "from" and "to" addresses are assumed to be heapword aligned.
1570 //
1571 // Arguments for generated stub:
1572 // from: O0
1573 // to: O1
1574 // count: O2 treated as signed
1575 //
1576 address generate_disjoint_short_copy(bool aligned, const char * name) {
1577 __ align(CodeEntryAlignment);
1578 StubCodeMark mark(this, "StubRoutines", name);
1579 address start = __ pc();
1581 Label L_skip_alignment, L_skip_alignment2;
1582 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1584 const Register from = O0; // source array address
1585 const Register to = O1; // destination array address
1586 const Register count = O2; // elements count
1587 const Register offset = O5; // offset from start of arrays
1588 // O3, O4, G3, G4 are used as temp registers
1590 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1592 if (!aligned) disjoint_short_copy_entry = __ pc();
1593 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1594 if (!aligned) BLOCK_COMMENT("Entry:");
1596 // for short arrays, just do single element copy
1597 __ cmp(count, 11); // 8 + 3 (22 bytes)
1598 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1599 __ delayed()->mov(G0, offset);
1601 if (aligned) {
1602 // 'aligned' == true when it is known statically during compilation
1603 // of this arraycopy call site that both 'from' and 'to' addresses
1604 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1605 //
1606 // Aligned arrays have 4 bytes alignment in 32-bits VM
1607 // and 8 bytes - in 64-bits VM.
1608 //
1609 #ifndef _LP64
1610 // copy a 2-elements word if necessary to align 'to' to 8 bytes
1611 __ andcc(to, 7, G0);
1612 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1613 __ delayed()->ld(from, 0, O3);
1614 __ inc(from, 4);
1615 __ inc(to, 4);
1616 __ dec(count, 2);
1617 __ st(O3, to, -4);
1618 __ BIND(L_skip_alignment);
1619 #endif
1620 } else {
1621 // copy 1 element if necessary to align 'to' on an 4 bytes
1622 __ andcc(to, 3, G0);
1623 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1624 __ delayed()->lduh(from, 0, O3);
1625 __ inc(from, 2);
1626 __ inc(to, 2);
1627 __ dec(count);
1628 __ sth(O3, to, -2);
1629 __ BIND(L_skip_alignment);
1631 // copy 2 elements to align 'to' on an 8 byte boundary
1632 __ andcc(to, 7, G0);
1633 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1634 __ delayed()->lduh(from, 0, O3);
1635 __ dec(count, 2);
1636 __ lduh(from, 2, O4);
1637 __ inc(from, 4);
1638 __ inc(to, 4);
1639 __ sth(O3, to, -4);
1640 __ sth(O4, to, -2);
1641 __ BIND(L_skip_alignment2);
1642 }
1643 #ifdef _LP64
1644 if (!aligned)
1645 #endif
1646 {
1647 // Copy with shift 16 bytes per iteration if arrays do not have
1648 // the same alignment mod 8, otherwise fall through to the next
1649 // code for aligned copy.
1650 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1651 // Also jump over aligned copy after the copy with shift completed.
1653 copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes);
1654 }
1656 // Both array are 8 bytes aligned, copy 16 bytes at a time
1657 __ and3(count, 3, G4); // Save
1658 __ srl(count, 2, count);
1659 generate_disjoint_long_copy_core(aligned);
1660 __ mov(G4, count); // restore
1662 // copy 1 element at a time
1663 __ BIND(L_copy_2_bytes);
1664 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1665 __ delayed()->nop();
1666 __ align(16);
1667 __ BIND(L_copy_2_bytes_loop);
1668 __ lduh(from, offset, O3);
1669 __ deccc(count);
1670 __ sth(O3, to, offset);
1671 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1672 __ delayed()->inc(offset, 2);
1674 __ BIND(L_exit);
1675 // O3, O4 are used as temp registers
1676 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1677 __ retl();
1678 __ delayed()->mov(G0, O0); // return 0
1679 return start;
1680 }
1682 //
1683 // Generate stub for conjoint short copy. If "aligned" is true, the
1684 // "from" and "to" addresses are assumed to be heapword aligned.
1685 //
1686 // Arguments for generated stub:
1687 // from: O0
1688 // to: O1
1689 // count: O2 treated as signed
1690 //
1691 address generate_conjoint_short_copy(bool aligned, const char * name) {
1692 // Do reverse copy.
1694 __ align(CodeEntryAlignment);
1695 StubCodeMark mark(this, "StubRoutines", name);
1696 address start = __ pc();
1697 address nooverlap_target = aligned ?
1698 StubRoutines::arrayof_jshort_disjoint_arraycopy() :
1699 disjoint_short_copy_entry;
1701 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1702 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1704 const Register from = O0; // source array address
1705 const Register to = O1; // destination array address
1706 const Register count = O2; // elements count
1707 const Register end_from = from; // source array end address
1708 const Register end_to = to; // destination array end address
1710 const Register byte_count = O3; // bytes count to copy
1712 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1714 if (!aligned) short_copy_entry = __ pc();
1715 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1716 if (!aligned) BLOCK_COMMENT("Entry:");
1718 array_overlap_test(nooverlap_target, 1);
1720 __ sllx(count, LogBytesPerShort, byte_count);
1721 __ add(to, byte_count, end_to); // offset after last copied element
1723 // for short arrays, just do single element copy
1724 __ cmp(count, 11); // 8 + 3 (22 bytes)
1725 __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1726 __ delayed()->add(from, byte_count, end_from);
1728 {
1729 // Align end of arrays since they could be not aligned even
1730 // when arrays itself are aligned.
1732 // copy 1 element if necessary to align 'end_to' on an 4 bytes
1733 __ andcc(end_to, 3, G0);
1734 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1735 __ delayed()->lduh(end_from, -2, O3);
1736 __ dec(end_from, 2);
1737 __ dec(end_to, 2);
1738 __ dec(count);
1739 __ sth(O3, end_to, 0);
1740 __ BIND(L_skip_alignment);
1742 // copy 2 elements to align 'end_to' on an 8 byte boundary
1743 __ andcc(end_to, 7, G0);
1744 __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1745 __ delayed()->lduh(end_from, -2, O3);
1746 __ dec(count, 2);
1747 __ lduh(end_from, -4, O4);
1748 __ dec(end_from, 4);
1749 __ dec(end_to, 4);
1750 __ sth(O3, end_to, 2);
1751 __ sth(O4, end_to, 0);
1752 __ BIND(L_skip_alignment2);
1753 }
1754 #ifdef _LP64
1755 if (aligned) {
1756 // Both arrays are aligned to 8-bytes in 64-bits VM.
1757 // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1758 // in unaligned case.
1759 __ dec(count, 8);
1760 } else
1761 #endif
1762 {
1763 // Copy with shift 16 bytes per iteration if arrays do not have
1764 // the same alignment mod 8, otherwise jump to the next
1765 // code for aligned copy (and substracting 8 from 'count' before jump).
1766 // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1767 // Also jump over aligned copy after the copy with shift completed.
1769 copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
1770 L_aligned_copy, L_copy_2_bytes);
1771 }
1772 // copy 4 elements (16 bytes) at a time
1773 __ align(16);
1774 __ BIND(L_aligned_copy);
1775 __ dec(end_from, 16);
1776 __ ldx(end_from, 8, O3);
1777 __ ldx(end_from, 0, O4);
1778 __ dec(end_to, 16);
1779 __ deccc(count, 8);
1780 __ stx(O3, end_to, 8);
1781 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1782 __ delayed()->stx(O4, end_to, 0);
1783 __ inc(count, 8);
1785 // copy 1 element (2 bytes) at a time
1786 __ BIND(L_copy_2_bytes);
1787 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1788 __ delayed()->nop();
1789 __ BIND(L_copy_2_bytes_loop);
1790 __ dec(end_from, 2);
1791 __ dec(end_to, 2);
1792 __ lduh(end_from, 0, O4);
1793 __ deccc(count);
1794 __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
1795 __ delayed()->sth(O4, end_to, 0);
1797 __ BIND(L_exit);
1798 // O3, O4 are used as temp registers
1799 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1800 __ retl();
1801 __ delayed()->mov(G0, O0); // return 0
1802 return start;
1803 }
1805 //
1806 // Generate core code for disjoint int copy (and oop copy on 32-bit).
1807 // If "aligned" is true, the "from" and "to" addresses are assumed
1808 // to be heapword aligned.
1809 //
1810 // Arguments:
1811 // from: O0
1812 // to: O1
1813 // count: O2 treated as signed
1814 //
1815 void generate_disjoint_int_copy_core(bool aligned) {
1817 Label L_skip_alignment, L_aligned_copy;
1818 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1820 const Register from = O0; // source array address
1821 const Register to = O1; // destination array address
1822 const Register count = O2; // elements count
1823 const Register offset = O5; // offset from start of arrays
1824 // O3, O4, G3, G4 are used as temp registers
1826 // 'aligned' == true when it is known statically during compilation
1827 // of this arraycopy call site that both 'from' and 'to' addresses
1828 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1829 //
1830 // Aligned arrays have 4 bytes alignment in 32-bits VM
1831 // and 8 bytes - in 64-bits VM.
1832 //
1833 #ifdef _LP64
1834 if (!aligned)
1835 #endif
1836 {
1837 // The next check could be put under 'ifndef' since the code in
1838 // generate_disjoint_long_copy_core() has own checks and set 'offset'.
1840 // for short arrays, just do single element copy
1841 __ cmp(count, 5); // 4 + 1 (20 bytes)
1842 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1843 __ delayed()->mov(G0, offset);
1845 // copy 1 element to align 'to' on an 8 byte boundary
1846 __ andcc(to, 7, G0);
1847 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1848 __ delayed()->ld(from, 0, O3);
1849 __ inc(from, 4);
1850 __ inc(to, 4);
1851 __ dec(count);
1852 __ st(O3, to, -4);
1853 __ BIND(L_skip_alignment);
1855 // if arrays have same alignment mod 8, do 4 elements copy
1856 __ andcc(from, 7, G0);
1857 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1858 __ delayed()->ld(from, 0, O3);
1860 //
1861 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1862 // to form 2 aligned 8-bytes chunks to store.
1863 //
1864 // copy_16_bytes_forward_with_shift() is not used here since this
1865 // code is more optimal.
1867 // copy with shift 4 elements (16 bytes) at a time
1868 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4
1870 __ align(16);
1871 __ BIND(L_copy_16_bytes);
1872 __ ldx(from, 4, O4);
1873 __ deccc(count, 4); // Can we do next iteration after this one?
1874 __ ldx(from, 12, G4);
1875 __ inc(to, 16);
1876 __ inc(from, 16);
1877 __ sllx(O3, 32, O3);
1878 __ srlx(O4, 32, G3);
1879 __ bset(G3, O3);
1880 __ stx(O3, to, -16);
1881 __ sllx(O4, 32, O4);
1882 __ srlx(G4, 32, G3);
1883 __ bset(G3, O4);
1884 __ stx(O4, to, -8);
1885 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
1886 __ delayed()->mov(G4, O3);
1888 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
1889 __ delayed()->inc(count, 4); // restore 'count'
1891 __ BIND(L_aligned_copy);
1892 }
1893 // copy 4 elements (16 bytes) at a time
1894 __ and3(count, 1, G4); // Save
1895 __ srl(count, 1, count);
1896 generate_disjoint_long_copy_core(aligned);
1897 __ mov(G4, count); // Restore
1899 // copy 1 element at a time
1900 __ BIND(L_copy_4_bytes);
1901 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1902 __ delayed()->nop();
1903 __ BIND(L_copy_4_bytes_loop);
1904 __ ld(from, offset, O3);
1905 __ deccc(count);
1906 __ st(O3, to, offset);
1907 __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
1908 __ delayed()->inc(offset, 4);
1909 __ BIND(L_exit);
1910 }
1912 //
1913 // Generate stub for disjoint int copy. If "aligned" is true, the
1914 // "from" and "to" addresses are assumed to be heapword aligned.
1915 //
1916 // Arguments for generated stub:
1917 // from: O0
1918 // to: O1
1919 // count: O2 treated as signed
1920 //
1921 address generate_disjoint_int_copy(bool aligned, const char * name) {
1922 __ align(CodeEntryAlignment);
1923 StubCodeMark mark(this, "StubRoutines", name);
1924 address start = __ pc();
1926 const Register count = O2;
1927 assert_clean_int(count, O3); // Make sure 'count' is clean int.
1929 if (!aligned) disjoint_int_copy_entry = __ pc();
1930 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1931 if (!aligned) BLOCK_COMMENT("Entry:");
1933 generate_disjoint_int_copy_core(aligned);
1935 // O3, O4 are used as temp registers
1936 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
1937 __ retl();
1938 __ delayed()->mov(G0, O0); // return 0
1939 return start;
1940 }
1942 //
1943 // Generate core code for conjoint int copy (and oop copy on 32-bit).
1944 // If "aligned" is true, the "from" and "to" addresses are assumed
1945 // to be heapword aligned.
1946 //
1947 // Arguments:
1948 // from: O0
1949 // to: O1
1950 // count: O2 treated as signed
1951 //
1952 void generate_conjoint_int_copy_core(bool aligned) {
1953 // Do reverse copy.
1955 Label L_skip_alignment, L_aligned_copy;
1956 Label L_copy_16_bytes, L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1958 const Register from = O0; // source array address
1959 const Register to = O1; // destination array address
1960 const Register count = O2; // elements count
1961 const Register end_from = from; // source array end address
1962 const Register end_to = to; // destination array end address
1963 // O3, O4, O5, G3 are used as temp registers
1965 const Register byte_count = O3; // bytes count to copy
1967 __ sllx(count, LogBytesPerInt, byte_count);
1968 __ add(to, byte_count, end_to); // offset after last copied element
1970 __ cmp(count, 5); // for short arrays, just do single element copy
1971 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1972 __ delayed()->add(from, byte_count, end_from);
1974 // copy 1 element to align 'to' on an 8 byte boundary
1975 __ andcc(end_to, 7, G0);
1976 __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1977 __ delayed()->nop();
1978 __ dec(count);
1979 __ dec(end_from, 4);
1980 __ dec(end_to, 4);
1981 __ ld(end_from, 0, O4);
1982 __ st(O4, end_to, 0);
1983 __ BIND(L_skip_alignment);
1985 // Check if 'end_from' and 'end_to' has the same alignment.
1986 __ andcc(end_from, 7, G0);
1987 __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1988 __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
1990 // copy with shift 4 elements (16 bytes) at a time
1991 //
1992 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1993 // to form 2 aligned 8-bytes chunks to store.
1994 //
1995 __ ldx(end_from, -4, O3);
1996 __ align(16);
1997 __ BIND(L_copy_16_bytes);
1998 __ ldx(end_from, -12, O4);
1999 __ deccc(count, 4);
2000 __ ldx(end_from, -20, O5);
2001 __ dec(end_to, 16);
2002 __ dec(end_from, 16);
2003 __ srlx(O3, 32, O3);
2004 __ sllx(O4, 32, G3);
2005 __ bset(G3, O3);
2006 __ stx(O3, end_to, 8);
2007 __ srlx(O4, 32, O4);
2008 __ sllx(O5, 32, G3);
2009 __ bset(O4, G3);
2010 __ stx(G3, end_to, 0);
2011 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2012 __ delayed()->mov(O5, O3);
2014 __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2015 __ delayed()->inc(count, 4);
2017 // copy 4 elements (16 bytes) at a time
2018 __ align(16);
2019 __ BIND(L_aligned_copy);
2020 __ dec(end_from, 16);
2021 __ ldx(end_from, 8, O3);
2022 __ ldx(end_from, 0, O4);
2023 __ dec(end_to, 16);
2024 __ deccc(count, 4);
2025 __ stx(O3, end_to, 8);
2026 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2027 __ delayed()->stx(O4, end_to, 0);
2028 __ inc(count, 4);
2030 // copy 1 element (4 bytes) at a time
2031 __ BIND(L_copy_4_bytes);
2032 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
2033 __ delayed()->nop();
2034 __ BIND(L_copy_4_bytes_loop);
2035 __ dec(end_from, 4);
2036 __ dec(end_to, 4);
2037 __ ld(end_from, 0, O4);
2038 __ deccc(count);
2039 __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
2040 __ delayed()->st(O4, end_to, 0);
2041 __ BIND(L_exit);
2042 }
2044 //
2045 // Generate stub for conjoint int copy. If "aligned" is true, the
2046 // "from" and "to" addresses are assumed to be heapword aligned.
2047 //
2048 // Arguments for generated stub:
2049 // from: O0
2050 // to: O1
2051 // count: O2 treated as signed
2052 //
2053 address generate_conjoint_int_copy(bool aligned, const char * name) {
2054 __ align(CodeEntryAlignment);
2055 StubCodeMark mark(this, "StubRoutines", name);
2056 address start = __ pc();
2058 address nooverlap_target = aligned ?
2059 StubRoutines::arrayof_jint_disjoint_arraycopy() :
2060 disjoint_int_copy_entry;
2062 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2064 if (!aligned) int_copy_entry = __ pc();
2065 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2066 if (!aligned) BLOCK_COMMENT("Entry:");
2068 array_overlap_test(nooverlap_target, 2);
2070 generate_conjoint_int_copy_core(aligned);
2072 // O3, O4 are used as temp registers
2073 inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2074 __ retl();
2075 __ delayed()->mov(G0, O0); // return 0
2076 return start;
2077 }
2079 //
2080 // Generate core code for disjoint long copy (and oop copy on 64-bit).
2081 // "aligned" is ignored, because we must make the stronger
2082 // assumption that both addresses are always 64-bit aligned.
2083 //
2084 // Arguments:
2085 // from: O0
2086 // to: O1
2087 // count: O2 treated as signed
2088 //
2089 void generate_disjoint_long_copy_core(bool aligned) {
2090 Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2091 const Register from = O0; // source array address
2092 const Register to = O1; // destination array address
2093 const Register count = O2; // elements count
2094 const Register offset0 = O4; // element offset
2095 const Register offset8 = O5; // next element offset
2097 __ deccc(count, 2);
2098 __ mov(G0, offset0); // offset from start of arrays (0)
2099 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2100 __ delayed()->add(offset0, 8, offset8);
2101 __ align(16);
2102 __ BIND(L_copy_16_bytes);
2103 __ ldx(from, offset0, O3);
2104 __ ldx(from, offset8, G3);
2105 __ deccc(count, 2);
2106 __ stx(O3, to, offset0);
2107 __ inc(offset0, 16);
2108 __ stx(G3, to, offset8);
2109 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2110 __ delayed()->inc(offset8, 16);
2112 __ BIND(L_copy_8_bytes);
2113 __ inccc(count, 2);
2114 __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2115 __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2116 __ ldx(from, offset0, O3);
2117 __ stx(O3, to, offset0);
2118 __ BIND(L_exit);
2119 }
2121 //
2122 // Generate stub for disjoint long copy.
2123 // "aligned" is ignored, because we must make the stronger
2124 // assumption that both addresses are always 64-bit aligned.
2125 //
2126 // Arguments for generated stub:
2127 // from: O0
2128 // to: O1
2129 // count: O2 treated as signed
2130 //
2131 address generate_disjoint_long_copy(bool aligned, const char * name) {
2132 __ align(CodeEntryAlignment);
2133 StubCodeMark mark(this, "StubRoutines", name);
2134 address start = __ pc();
2136 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2138 if (!aligned) disjoint_long_copy_entry = __ pc();
2139 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2140 if (!aligned) BLOCK_COMMENT("Entry:");
2142 generate_disjoint_long_copy_core(aligned);
2144 // O3, O4 are used as temp registers
2145 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2146 __ retl();
2147 __ delayed()->mov(G0, O0); // return 0
2148 return start;
2149 }
2151 //
2152 // Generate core code for conjoint long copy (and oop copy on 64-bit).
2153 // "aligned" is ignored, because we must make the stronger
2154 // assumption that both addresses are always 64-bit aligned.
2155 //
2156 // Arguments:
2157 // from: O0
2158 // to: O1
2159 // count: O2 treated as signed
2160 //
2161 void generate_conjoint_long_copy_core(bool aligned) {
2162 // Do reverse copy.
2163 Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2164 const Register from = O0; // source array address
2165 const Register to = O1; // destination array address
2166 const Register count = O2; // elements count
2167 const Register offset8 = O4; // element offset
2168 const Register offset0 = O5; // previous element offset
2170 __ subcc(count, 1, count);
2171 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2172 __ delayed()->sllx(count, LogBytesPerLong, offset8);
2173 __ sub(offset8, 8, offset0);
2174 __ align(16);
2175 __ BIND(L_copy_16_bytes);
2176 __ ldx(from, offset8, O2);
2177 __ ldx(from, offset0, O3);
2178 __ stx(O2, to, offset8);
2179 __ deccc(offset8, 16); // use offset8 as counter
2180 __ stx(O3, to, offset0);
2181 __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2182 __ delayed()->dec(offset0, 16);
2184 __ BIND(L_copy_8_bytes);
2185 __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2186 __ delayed()->nop();
2187 __ ldx(from, 0, O3);
2188 __ stx(O3, to, 0);
2189 __ BIND(L_exit);
2190 }
2192 // Generate stub for conjoint long copy.
2193 // "aligned" is ignored, because we must make the stronger
2194 // assumption that both addresses are always 64-bit aligned.
2195 //
2196 // Arguments for generated stub:
2197 // from: O0
2198 // to: O1
2199 // count: O2 treated as signed
2200 //
2201 address generate_conjoint_long_copy(bool aligned, const char * name) {
2202 __ align(CodeEntryAlignment);
2203 StubCodeMark mark(this, "StubRoutines", name);
2204 address start = __ pc();
2206 assert(!aligned, "usage");
2207 address nooverlap_target = disjoint_long_copy_entry;
2209 assert_clean_int(O2, O3); // Make sure 'count' is clean int.
2211 if (!aligned) long_copy_entry = __ pc();
2212 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2213 if (!aligned) BLOCK_COMMENT("Entry:");
2215 array_overlap_test(nooverlap_target, 3);
2217 generate_conjoint_long_copy_core(aligned);
2219 // O3, O4 are used as temp registers
2220 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2221 __ retl();
2222 __ delayed()->mov(G0, O0); // return 0
2223 return start;
2224 }
2226 // Generate stub for disjoint oop copy. If "aligned" is true, the
2227 // "from" and "to" addresses are assumed to be heapword aligned.
2228 //
2229 // Arguments for generated stub:
2230 // from: O0
2231 // to: O1
2232 // count: O2 treated as signed
2233 //
2234 address generate_disjoint_oop_copy(bool aligned, const char * name) {
2236 const Register from = O0; // source array address
2237 const Register to = O1; // destination array address
2238 const Register count = O2; // elements count
2240 __ align(CodeEntryAlignment);
2241 StubCodeMark mark(this, "StubRoutines", name);
2242 address start = __ pc();
2244 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2246 if (!aligned) disjoint_oop_copy_entry = __ pc();
2247 // caller can pass a 64-bit byte count here
2248 if (!aligned) BLOCK_COMMENT("Entry:");
2250 // save arguments for barrier generation
2251 __ mov(to, G1);
2252 __ mov(count, G5);
2253 gen_write_ref_array_pre_barrier(G1, G5);
2254 #ifdef _LP64
2255 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2256 if (UseCompressedOops) {
2257 generate_disjoint_int_copy_core(aligned);
2258 } else {
2259 generate_disjoint_long_copy_core(aligned);
2260 }
2261 #else
2262 generate_disjoint_int_copy_core(aligned);
2263 #endif
2264 // O0 is used as temp register
2265 gen_write_ref_array_post_barrier(G1, G5, O0);
2267 // O3, O4 are used as temp registers
2268 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2269 __ retl();
2270 __ delayed()->mov(G0, O0); // return 0
2271 return start;
2272 }
2274 // Generate stub for conjoint oop copy. If "aligned" is true, the
2275 // "from" and "to" addresses are assumed to be heapword aligned.
2276 //
2277 // Arguments for generated stub:
2278 // from: O0
2279 // to: O1
2280 // count: O2 treated as signed
2281 //
2282 address generate_conjoint_oop_copy(bool aligned, const char * name) {
2284 const Register from = O0; // source array address
2285 const Register to = O1; // destination array address
2286 const Register count = O2; // elements count
2288 __ align(CodeEntryAlignment);
2289 StubCodeMark mark(this, "StubRoutines", name);
2290 address start = __ pc();
2292 assert_clean_int(count, O3); // Make sure 'count' is clean int.
2294 if (!aligned) oop_copy_entry = __ pc();
2295 // caller can pass a 64-bit byte count here
2296 if (!aligned) BLOCK_COMMENT("Entry:");
2298 // save arguments for barrier generation
2299 __ mov(to, G1);
2300 __ mov(count, G5);
2302 gen_write_ref_array_pre_barrier(G1, G5);
2304 address nooverlap_target = aligned ?
2305 StubRoutines::arrayof_oop_disjoint_arraycopy() :
2306 disjoint_oop_copy_entry;
2308 array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2310 #ifdef _LP64
2311 if (UseCompressedOops) {
2312 generate_conjoint_int_copy_core(aligned);
2313 } else {
2314 generate_conjoint_long_copy_core(aligned);
2315 }
2316 #else
2317 generate_conjoint_int_copy_core(aligned);
2318 #endif
2320 // O0 is used as temp register
2321 gen_write_ref_array_post_barrier(G1, G5, O0);
2323 // O3, O4 are used as temp registers
2324 inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2325 __ retl();
2326 __ delayed()->mov(G0, O0); // return 0
2327 return start;
2328 }
2331 // Helper for generating a dynamic type check.
2332 // Smashes only the given temp registers.
2333 void generate_type_check(Register sub_klass,
2334 Register super_check_offset,
2335 Register super_klass,
2336 Register temp,
2337 Label& L_success,
2338 Register deccc_hack = noreg) {
2339 assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2341 BLOCK_COMMENT("type_check:");
2343 Label L_miss;
2345 assert_clean_int(super_check_offset, temp);
2347 // maybe decrement caller's trip count:
2348 #define DELAY_SLOT delayed(); \
2349 { if (deccc_hack == noreg) __ nop(); else __ deccc(deccc_hack); }
2351 // if the pointers are equal, we are done (e.g., String[] elements)
2352 __ cmp(sub_klass, super_klass);
2353 __ brx(Assembler::equal, true, Assembler::pt, L_success);
2354 __ DELAY_SLOT;
2356 // check the supertype display:
2357 __ ld_ptr(sub_klass, super_check_offset, temp); // query the super type
2358 __ cmp(super_klass, temp); // test the super type
2359 __ brx(Assembler::equal, true, Assembler::pt, L_success);
2360 __ DELAY_SLOT;
2362 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
2363 Klass::secondary_super_cache_offset_in_bytes());
2364 __ cmp(super_klass, sc_offset);
2365 __ brx(Assembler::notEqual, true, Assembler::pt, L_miss);
2366 __ delayed()->nop();
2368 __ save_frame(0);
2369 __ mov(sub_klass->after_save(), O1);
2370 // mov(super_klass->after_save(), O2); //fill delay slot
2371 assert(StubRoutines::Sparc::_partial_subtype_check != NULL, "order of generation");
2372 __ call(StubRoutines::Sparc::_partial_subtype_check);
2373 __ delayed()->mov(super_klass->after_save(), O2);
2374 __ restore();
2376 // Upon return, the condition codes are already set.
2377 __ brx(Assembler::equal, true, Assembler::pt, L_success);
2378 __ DELAY_SLOT;
2380 #undef DELAY_SLOT
2382 // Fall through on failure!
2383 __ BIND(L_miss);
2384 }
2387 // Generate stub for checked oop copy.
2388 //
2389 // Arguments for generated stub:
2390 // from: O0
2391 // to: O1
2392 // count: O2 treated as signed
2393 // ckoff: O3 (super_check_offset)
2394 // ckval: O4 (super_klass)
2395 // ret: O0 zero for success; (-1^K) where K is partial transfer count
2396 //
2397 address generate_checkcast_copy(const char* name) {
2399 const Register O0_from = O0; // source array address
2400 const Register O1_to = O1; // destination array address
2401 const Register O2_count = O2; // elements count
2402 const Register O3_ckoff = O3; // super_check_offset
2403 const Register O4_ckval = O4; // super_klass
2405 const Register O5_offset = O5; // loop var, with stride wordSize
2406 const Register G1_remain = G1; // loop var, with stride -1
2407 const Register G3_oop = G3; // actual oop copied
2408 const Register G4_klass = G4; // oop._klass
2409 const Register G5_super = G5; // oop._klass._primary_supers[ckval]
2411 __ align(CodeEntryAlignment);
2412 StubCodeMark mark(this, "StubRoutines", name);
2413 address start = __ pc();
2415 gen_write_ref_array_pre_barrier(G1, G5);
2418 #ifdef ASSERT
2419 // We sometimes save a frame (see partial_subtype_check below).
2420 // If this will cause trouble, let's fail now instead of later.
2421 __ save_frame(0);
2422 __ restore();
2423 #endif
2425 #ifdef ASSERT
2426 // caller guarantees that the arrays really are different
2427 // otherwise, we would have to make conjoint checks
2428 { Label L;
2429 __ mov(O3, G1); // spill: overlap test smashes O3
2430 __ mov(O4, G4); // spill: overlap test smashes O4
2431 array_overlap_test(L, LogBytesPerHeapOop);
2432 __ stop("checkcast_copy within a single array");
2433 __ bind(L);
2434 __ mov(G1, O3);
2435 __ mov(G4, O4);
2436 }
2437 #endif //ASSERT
2439 assert_clean_int(O2_count, G1); // Make sure 'count' is clean int.
2441 checkcast_copy_entry = __ pc();
2442 // caller can pass a 64-bit byte count here (from generic stub)
2443 BLOCK_COMMENT("Entry:");
2445 Label load_element, store_element, do_card_marks, fail, done;
2446 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it
2447 __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2448 __ delayed()->mov(G0, O5_offset); // offset from start of arrays
2450 // Empty array: Nothing to do.
2451 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2452 __ retl();
2453 __ delayed()->set(0, O0); // return 0 on (trivial) success
2455 // ======== begin loop ========
2456 // (Loop is rotated; its entry is load_element.)
2457 // Loop variables:
2458 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2459 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2460 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2461 __ align(16);
2463 __ bind(store_element);
2464 // deccc(G1_remain); // decrement the count (hoisted)
2465 __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
2466 __ inc(O5_offset, heapOopSize); // step to next offset
2467 __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
2468 __ delayed()->set(0, O0); // return -1 on success
2470 // ======== loop entry is here ========
2471 __ bind(load_element);
2472 __ load_heap_oop(O0_from, O5_offset, G3_oop); // load the oop
2473 __ br_null(G3_oop, true, Assembler::pt, store_element);
2474 __ delayed()->deccc(G1_remain); // decrement the count
2476 __ load_klass(G3_oop, G4_klass); // query the object klass
2478 generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2479 // branch to this on success:
2480 store_element,
2481 // decrement this on success:
2482 G1_remain);
2483 // ======== end loop ========
2485 // It was a real error; we must depend on the caller to finish the job.
2486 // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2487 // Emit GC store barriers for the oops we have copied (O2 minus G1),
2488 // and report their number to the caller.
2489 __ bind(fail);
2490 __ subcc(O2_count, G1_remain, O2_count);
2491 __ brx(Assembler::zero, false, Assembler::pt, done);
2492 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller
2494 __ bind(do_card_marks);
2495 gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2]
2497 __ bind(done);
2498 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2499 __ retl();
2500 __ delayed()->nop(); // return value in 00
2502 return start;
2503 }
2506 // Generate 'unsafe' array copy stub
2507 // Though just as safe as the other stubs, it takes an unscaled
2508 // size_t argument instead of an element count.
2509 //
2510 // Arguments for generated stub:
2511 // from: O0
2512 // to: O1
2513 // count: O2 byte count, treated as ssize_t, can be zero
2514 //
2515 // Examines the alignment of the operands and dispatches
2516 // to a long, int, short, or byte copy loop.
2517 //
2518 address generate_unsafe_copy(const char* name) {
2520 const Register O0_from = O0; // source array address
2521 const Register O1_to = O1; // destination array address
2522 const Register O2_count = O2; // elements count
2524 const Register G1_bits = G1; // test copy of low bits
2526 __ align(CodeEntryAlignment);
2527 StubCodeMark mark(this, "StubRoutines", name);
2528 address start = __ pc();
2530 // bump this on entry, not on exit:
2531 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2533 __ or3(O0_from, O1_to, G1_bits);
2534 __ or3(O2_count, G1_bits, G1_bits);
2536 __ btst(BytesPerLong-1, G1_bits);
2537 __ br(Assembler::zero, true, Assembler::pt,
2538 long_copy_entry, relocInfo::runtime_call_type);
2539 // scale the count on the way out:
2540 __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2542 __ btst(BytesPerInt-1, G1_bits);
2543 __ br(Assembler::zero, true, Assembler::pt,
2544 int_copy_entry, relocInfo::runtime_call_type);
2545 // scale the count on the way out:
2546 __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2548 __ btst(BytesPerShort-1, G1_bits);
2549 __ br(Assembler::zero, true, Assembler::pt,
2550 short_copy_entry, relocInfo::runtime_call_type);
2551 // scale the count on the way out:
2552 __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2554 __ br(Assembler::always, false, Assembler::pt,
2555 byte_copy_entry, relocInfo::runtime_call_type);
2556 __ delayed()->nop();
2558 return start;
2559 }
2562 // Perform range checks on the proposed arraycopy.
2563 // Kills the two temps, but nothing else.
2564 // Also, clean the sign bits of src_pos and dst_pos.
2565 void arraycopy_range_checks(Register src, // source array oop (O0)
2566 Register src_pos, // source position (O1)
2567 Register dst, // destination array oo (O2)
2568 Register dst_pos, // destination position (O3)
2569 Register length, // length of copy (O4)
2570 Register temp1, Register temp2,
2571 Label& L_failed) {
2572 BLOCK_COMMENT("arraycopy_range_checks:");
2574 // if (src_pos + length > arrayOop(src)->length() ) FAIL;
2576 const Register array_length = temp1; // scratch
2577 const Register end_pos = temp2; // scratch
2579 // Note: This next instruction may be in the delay slot of a branch:
2580 __ add(length, src_pos, end_pos); // src_pos + length
2581 __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2582 __ cmp(end_pos, array_length);
2583 __ br(Assembler::greater, false, Assembler::pn, L_failed);
2585 // if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2586 __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2587 __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2588 __ cmp(end_pos, array_length);
2589 __ br(Assembler::greater, false, Assembler::pn, L_failed);
2591 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2592 // Move with sign extension can be used since they are positive.
2593 __ delayed()->signx(src_pos, src_pos);
2594 __ signx(dst_pos, dst_pos);
2596 BLOCK_COMMENT("arraycopy_range_checks done");
2597 }
2600 //
2601 // Generate generic array copy stubs
2602 //
2603 // Input:
2604 // O0 - src oop
2605 // O1 - src_pos
2606 // O2 - dst oop
2607 // O3 - dst_pos
2608 // O4 - element count
2609 //
2610 // Output:
2611 // O0 == 0 - success
2612 // O0 == -1 - need to call System.arraycopy
2613 //
2614 address generate_generic_copy(const char *name) {
2616 Label L_failed, L_objArray;
2618 // Input registers
2619 const Register src = O0; // source array oop
2620 const Register src_pos = O1; // source position
2621 const Register dst = O2; // destination array oop
2622 const Register dst_pos = O3; // destination position
2623 const Register length = O4; // elements count
2625 // registers used as temp
2626 const Register G3_src_klass = G3; // source array klass
2627 const Register G4_dst_klass = G4; // destination array klass
2628 const Register G5_lh = G5; // layout handler
2629 const Register O5_temp = O5;
2631 __ align(CodeEntryAlignment);
2632 StubCodeMark mark(this, "StubRoutines", name);
2633 address start = __ pc();
2635 // bump this on entry, not on exit:
2636 inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2638 // In principle, the int arguments could be dirty.
2639 //assert_clean_int(src_pos, G1);
2640 //assert_clean_int(dst_pos, G1);
2641 //assert_clean_int(length, G1);
2643 //-----------------------------------------------------------------------
2644 // Assembler stubs will be used for this call to arraycopy
2645 // if the following conditions are met:
2646 //
2647 // (1) src and dst must not be null.
2648 // (2) src_pos must not be negative.
2649 // (3) dst_pos must not be negative.
2650 // (4) length must not be negative.
2651 // (5) src klass and dst klass should be the same and not NULL.
2652 // (6) src and dst should be arrays.
2653 // (7) src_pos + length must not exceed length of src.
2654 // (8) dst_pos + length must not exceed length of dst.
2655 BLOCK_COMMENT("arraycopy initial argument checks");
2657 // if (src == NULL) return -1;
2658 __ br_null(src, false, Assembler::pn, L_failed);
2660 // if (src_pos < 0) return -1;
2661 __ delayed()->tst(src_pos);
2662 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2663 __ delayed()->nop();
2665 // if (dst == NULL) return -1;
2666 __ br_null(dst, false, Assembler::pn, L_failed);
2668 // if (dst_pos < 0) return -1;
2669 __ delayed()->tst(dst_pos);
2670 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2672 // if (length < 0) return -1;
2673 __ delayed()->tst(length);
2674 __ br(Assembler::negative, false, Assembler::pn, L_failed);
2676 BLOCK_COMMENT("arraycopy argument klass checks");
2677 // get src->klass()
2678 if (UseCompressedOops) {
2679 __ delayed()->nop(); // ??? not good
2680 __ load_klass(src, G3_src_klass);
2681 } else {
2682 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
2683 }
2685 #ifdef ASSERT
2686 // assert(src->klass() != NULL);
2687 BLOCK_COMMENT("assert klasses not null");
2688 { Label L_a, L_b;
2689 __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
2690 __ delayed()->nop();
2691 __ bind(L_a);
2692 __ stop("broken null klass");
2693 __ bind(L_b);
2694 __ load_klass(dst, G4_dst_klass);
2695 __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
2696 __ delayed()->mov(G0, G4_dst_klass); // scribble the temp
2697 BLOCK_COMMENT("assert done");
2698 }
2699 #endif
2701 // Load layout helper
2702 //
2703 // |array_tag| | header_size | element_type | |log2_element_size|
2704 // 32 30 24 16 8 2 0
2705 //
2706 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2707 //
2709 int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2710 Klass::layout_helper_offset_in_bytes();
2712 // Load 32-bits signed value. Use br() instruction with it to check icc.
2713 __ lduw(G3_src_klass, lh_offset, G5_lh);
2715 if (UseCompressedOops) {
2716 __ load_klass(dst, G4_dst_klass);
2717 }
2718 // Handle objArrays completely differently...
2719 juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2720 __ set(objArray_lh, O5_temp);
2721 __ cmp(G5_lh, O5_temp);
2722 __ br(Assembler::equal, false, Assembler::pt, L_objArray);
2723 if (UseCompressedOops) {
2724 __ delayed()->nop();
2725 } else {
2726 __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
2727 }
2729 // if (src->klass() != dst->klass()) return -1;
2730 __ cmp(G3_src_klass, G4_dst_klass);
2731 __ brx(Assembler::notEqual, false, Assembler::pn, L_failed);
2732 __ delayed()->nop();
2734 // if (!src->is_Array()) return -1;
2735 __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
2736 __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
2738 // At this point, it is known to be a typeArray (array_tag 0x3).
2739 #ifdef ASSERT
2740 __ delayed()->nop();
2741 { Label L;
2742 jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2743 __ set(lh_prim_tag_in_place, O5_temp);
2744 __ cmp(G5_lh, O5_temp);
2745 __ br(Assembler::greaterEqual, false, Assembler::pt, L);
2746 __ delayed()->nop();
2747 __ stop("must be a primitive array");
2748 __ bind(L);
2749 }
2750 #else
2751 __ delayed(); // match next insn to prev branch
2752 #endif
2754 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2755 O5_temp, G4_dst_klass, L_failed);
2757 // typeArrayKlass
2758 //
2759 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2760 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2761 //
2763 const Register G4_offset = G4_dst_klass; // array offset
2764 const Register G3_elsize = G3_src_klass; // log2 element size
2766 __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
2767 __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
2768 __ add(src, G4_offset, src); // src array offset
2769 __ add(dst, G4_offset, dst); // dst array offset
2770 __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
2772 // next registers should be set before the jump to corresponding stub
2773 const Register from = O0; // source array address
2774 const Register to = O1; // destination array address
2775 const Register count = O2; // elements count
2777 // 'from', 'to', 'count' registers should be set in this order
2778 // since they are the same as 'src', 'src_pos', 'dst'.
2780 BLOCK_COMMENT("scale indexes to element size");
2781 __ sll_ptr(src_pos, G3_elsize, src_pos);
2782 __ sll_ptr(dst_pos, G3_elsize, dst_pos);
2783 __ add(src, src_pos, from); // src_addr
2784 __ add(dst, dst_pos, to); // dst_addr
2786 BLOCK_COMMENT("choose copy loop based on element size");
2787 __ cmp(G3_elsize, 0);
2788 __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jbyte_arraycopy);
2789 __ delayed()->signx(length, count); // length
2791 __ cmp(G3_elsize, LogBytesPerShort);
2792 __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jshort_arraycopy);
2793 __ delayed()->signx(length, count); // length
2795 __ cmp(G3_elsize, LogBytesPerInt);
2796 __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jint_arraycopy);
2797 __ delayed()->signx(length, count); // length
2798 #ifdef ASSERT
2799 { Label L;
2800 __ cmp(G3_elsize, LogBytesPerLong);
2801 __ br(Assembler::equal, false, Assembler::pt, L);
2802 __ delayed()->nop();
2803 __ stop("must be long copy, but elsize is wrong");
2804 __ bind(L);
2805 }
2806 #endif
2807 __ br(Assembler::always,false,Assembler::pt,StubRoutines::_jlong_arraycopy);
2808 __ delayed()->signx(length, count); // length
2810 // objArrayKlass
2811 __ BIND(L_objArray);
2812 // live at this point: G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
2814 Label L_plain_copy, L_checkcast_copy;
2815 // test array classes for subtyping
2816 __ cmp(G3_src_klass, G4_dst_klass); // usual case is exact equality
2817 __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
2818 __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
2820 // Identically typed arrays can be copied without element-wise checks.
2821 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2822 O5_temp, G5_lh, L_failed);
2824 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2825 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2826 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2827 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2828 __ add(src, src_pos, from); // src_addr
2829 __ add(dst, dst_pos, to); // dst_addr
2830 __ BIND(L_plain_copy);
2831 __ br(Assembler::always, false, Assembler::pt,StubRoutines::_oop_arraycopy);
2832 __ delayed()->signx(length, count); // length
2834 __ BIND(L_checkcast_copy);
2835 // live at this point: G3_src_klass, G4_dst_klass
2836 {
2837 // Before looking at dst.length, make sure dst is also an objArray.
2838 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
2839 __ cmp(G5_lh, O5_temp);
2840 __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
2842 // It is safe to examine both src.length and dst.length.
2843 __ delayed(); // match next insn to prev branch
2844 arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2845 O5_temp, G5_lh, L_failed);
2847 // Marshal the base address arguments now, freeing registers.
2848 __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2849 __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2850 __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2851 __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2852 __ add(src, src_pos, from); // src_addr
2853 __ add(dst, dst_pos, to); // dst_addr
2854 __ signx(length, count); // length (reloaded)
2856 Register sco_temp = O3; // this register is free now
2857 assert_different_registers(from, to, count, sco_temp,
2858 G4_dst_klass, G3_src_klass);
2860 // Generate the type check.
2861 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2862 Klass::super_check_offset_offset_in_bytes());
2863 __ lduw(G4_dst_klass, sco_offset, sco_temp);
2864 generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
2865 O5_temp, L_plain_copy);
2867 // Fetch destination element klass from the objArrayKlass header.
2868 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2869 objArrayKlass::element_klass_offset_in_bytes());
2871 // the checkcast_copy loop needs two extra arguments:
2872 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass
2873 // lduw(O4, sco_offset, O3); // sco of elem klass
2875 __ br(Assembler::always, false, Assembler::pt, checkcast_copy_entry);
2876 __ delayed()->lduw(O4, sco_offset, O3);
2877 }
2879 __ BIND(L_failed);
2880 __ retl();
2881 __ delayed()->sub(G0, 1, O0); // return -1
2882 return start;
2883 }
2885 void generate_arraycopy_stubs() {
2887 // Note: the disjoint stubs must be generated first, some of
2888 // the conjoint stubs use them.
2889 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2890 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2891 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
2892 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
2893 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy");
2894 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
2895 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
2896 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
2897 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
2898 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy");
2900 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2901 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
2902 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy");
2903 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy");
2904 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy");
2905 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
2906 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
2907 #ifdef _LP64
2908 // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
2909 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
2910 #else
2911 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2912 #endif
2913 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2914 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2916 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2917 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
2918 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
2919 }
2921 void generate_initial() {
2922 // Generates all stubs and initializes the entry points
2924 //------------------------------------------------------------------------------------------------------------------------
2925 // entry points that exist in all platforms
2926 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
2927 // the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
2928 StubRoutines::_forward_exception_entry = generate_forward_exception();
2930 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
2931 StubRoutines::_catch_exception_entry = generate_catch_exception();
2933 //------------------------------------------------------------------------------------------------------------------------
2934 // entry points that are platform specific
2935 StubRoutines::Sparc::_test_stop_entry = generate_test_stop();
2937 StubRoutines::Sparc::_stop_subroutine_entry = generate_stop_subroutine();
2938 StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
2940 #if !defined(COMPILER2) && !defined(_LP64)
2941 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2942 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
2943 StubRoutines::_atomic_add_entry = generate_atomic_add();
2944 StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry;
2945 StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry;
2946 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2947 StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry;
2948 StubRoutines::_fence_entry = generate_fence();
2949 #endif // COMPILER2 !=> _LP64
2951 StubRoutines::Sparc::_partial_subtype_check = generate_partial_subtype_check();
2952 }
2955 void generate_all() {
2956 // Generates all stubs and initializes the entry points
2958 // These entry points require SharedInfo::stack0 to be set up in non-core builds
2959 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
2960 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
2961 StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
2962 StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
2963 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
2964 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
2966 StubRoutines::_handler_for_unsafe_access_entry =
2967 generate_handler_for_unsafe_access();
2969 // support for verify_oop (must happen after universe_init)
2970 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
2972 // arraycopy stubs used by compilers
2973 generate_arraycopy_stubs();
2974 }
2977 public:
2978 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2979 // replace the standard masm with a special one:
2980 _masm = new MacroAssembler(code);
2982 _stub_count = !all ? 0x100 : 0x200;
2983 if (all) {
2984 generate_all();
2985 } else {
2986 generate_initial();
2987 }
2989 // make sure this stub is available for all local calls
2990 if (_atomic_add_stub.is_unbound()) {
2991 // generate a second time, if necessary
2992 (void) generate_atomic_add();
2993 }
2994 }
2997 private:
2998 int _stub_count;
2999 void stub_prolog(StubCodeDesc* cdesc) {
3000 # ifdef ASSERT
3001 // put extra information in the stub code, to make it more readable
3002 #ifdef _LP64
3003 // Write the high part of the address
3004 // [RGV] Check if there is a dependency on the size of this prolog
3005 __ emit_data((intptr_t)cdesc >> 32, relocInfo::none);
3006 #endif
3007 __ emit_data((intptr_t)cdesc, relocInfo::none);
3008 __ emit_data(++_stub_count, relocInfo::none);
3009 # endif
3010 align(true);
3011 }
3013 void align(bool at_header = false) {
3014 // %%%%% move this constant somewhere else
3015 // UltraSPARC cache line size is 8 instructions:
3016 const unsigned int icache_line_size = 32;
3017 const unsigned int icache_half_line_size = 16;
3019 if (at_header) {
3020 while ((intptr_t)(__ pc()) % icache_line_size != 0) {
3021 __ emit_data(0, relocInfo::none);
3022 }
3023 } else {
3024 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
3025 __ nop();
3026 }
3027 }
3028 }
3030 }; // end class declaration
3033 address StubGenerator::disjoint_byte_copy_entry = NULL;
3034 address StubGenerator::disjoint_short_copy_entry = NULL;
3035 address StubGenerator::disjoint_int_copy_entry = NULL;
3036 address StubGenerator::disjoint_long_copy_entry = NULL;
3037 address StubGenerator::disjoint_oop_copy_entry = NULL;
3039 address StubGenerator::byte_copy_entry = NULL;
3040 address StubGenerator::short_copy_entry = NULL;
3041 address StubGenerator::int_copy_entry = NULL;
3042 address StubGenerator::long_copy_entry = NULL;
3043 address StubGenerator::oop_copy_entry = NULL;
3045 address StubGenerator::checkcast_copy_entry = NULL;
3047 void StubGenerator_generate(CodeBuffer* code, bool all) {
3048 StubGenerator g(code, all);
3049 }