Wed, 16 Feb 2011 13:30:31 -0800
7013964: openjdk LICENSE file needs rebranding
Reviewed-by: darcy, katleman, jjg
1 /*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_x86.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "nativeInst_x86.hpp"
30 #include "oops/instanceOop.hpp"
31 #include "oops/methodOop.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/frame.inline.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/stubRoutines.hpp"
40 #include "utilities/top.hpp"
41 #ifdef TARGET_OS_FAMILY_linux
42 # include "thread_linux.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_FAMILY_solaris
45 # include "thread_solaris.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_windows
48 # include "thread_windows.inline.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/runtime.hpp"
52 #endif
54 // Declaration and definition of StubGenerator (no .hpp file).
55 // For a more detailed description of the stub routine structure
56 // see the comment in stubRoutines.hpp
58 #define __ _masm->
59 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
60 #define a__ ((Assembler*)_masm)->
62 #ifdef PRODUCT
63 #define BLOCK_COMMENT(str) /* nothing */
64 #else
65 #define BLOCK_COMMENT(str) __ block_comment(str)
66 #endif
68 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
69 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
71 // Stub Code definitions
73 static address handle_unsafe_access() {
74 JavaThread* thread = JavaThread::current();
75 address pc = thread->saved_exception_pc();
76 // pc is the instruction which we must emulate
77 // doing a no-op is fine: return garbage from the load
78 // therefore, compute npc
79 address npc = Assembler::locate_next_instruction(pc);
81 // request an async exception
82 thread->set_pending_unsafe_access_error();
84 // return address of next instruction to execute
85 return npc;
86 }
88 class StubGenerator: public StubCodeGenerator {
89 private:
91 #ifdef PRODUCT
92 #define inc_counter_np(counter) (0)
93 #else
94 void inc_counter_np_(int& counter) {
95 __ incrementl(ExternalAddress((address)&counter));
96 }
97 #define inc_counter_np(counter) \
98 BLOCK_COMMENT("inc_counter " #counter); \
99 inc_counter_np_(counter);
100 #endif
102 // Call stubs are used to call Java from C
103 //
104 // Linux Arguments:
105 // c_rarg0: call wrapper address address
106 // c_rarg1: result address
107 // c_rarg2: result type BasicType
108 // c_rarg3: method methodOop
109 // c_rarg4: (interpreter) entry point address
110 // c_rarg5: parameters intptr_t*
111 // 16(rbp): parameter size (in words) int
112 // 24(rbp): thread Thread*
113 //
114 // [ return_from_Java ] <--- rsp
115 // [ argument word n ]
116 // ...
117 // -12 [ argument word 1 ]
118 // -11 [ saved r15 ] <--- rsp_after_call
119 // -10 [ saved r14 ]
120 // -9 [ saved r13 ]
121 // -8 [ saved r12 ]
122 // -7 [ saved rbx ]
123 // -6 [ call wrapper ]
124 // -5 [ result ]
125 // -4 [ result type ]
126 // -3 [ method ]
127 // -2 [ entry point ]
128 // -1 [ parameters ]
129 // 0 [ saved rbp ] <--- rbp
130 // 1 [ return address ]
131 // 2 [ parameter size ]
132 // 3 [ thread ]
133 //
134 // Windows Arguments:
135 // c_rarg0: call wrapper address address
136 // c_rarg1: result address
137 // c_rarg2: result type BasicType
138 // c_rarg3: method methodOop
139 // 48(rbp): (interpreter) entry point address
140 // 56(rbp): parameters intptr_t*
141 // 64(rbp): parameter size (in words) int
142 // 72(rbp): thread Thread*
143 //
144 // [ return_from_Java ] <--- rsp
145 // [ argument word n ]
146 // ...
147 // -8 [ argument word 1 ]
148 // -7 [ saved r15 ] <--- rsp_after_call
149 // -6 [ saved r14 ]
150 // -5 [ saved r13 ]
151 // -4 [ saved r12 ]
152 // -3 [ saved rdi ]
153 // -2 [ saved rsi ]
154 // -1 [ saved rbx ]
155 // 0 [ saved rbp ] <--- rbp
156 // 1 [ return address ]
157 // 2 [ call wrapper ]
158 // 3 [ result ]
159 // 4 [ result type ]
160 // 5 [ method ]
161 // 6 [ entry point ]
162 // 7 [ parameters ]
163 // 8 [ parameter size ]
164 // 9 [ thread ]
165 //
166 // Windows reserves the callers stack space for arguments 1-4.
167 // We spill c_rarg0-c_rarg3 to this space.
169 // Call stub stack layout word offsets from rbp
170 enum call_stub_layout {
171 #ifdef _WIN64
172 rsp_after_call_off = -7,
173 r15_off = rsp_after_call_off,
174 r14_off = -6,
175 r13_off = -5,
176 r12_off = -4,
177 rdi_off = -3,
178 rsi_off = -2,
179 rbx_off = -1,
180 rbp_off = 0,
181 retaddr_off = 1,
182 call_wrapper_off = 2,
183 result_off = 3,
184 result_type_off = 4,
185 method_off = 5,
186 entry_point_off = 6,
187 parameters_off = 7,
188 parameter_size_off = 8,
189 thread_off = 9
190 #else
191 rsp_after_call_off = -12,
192 mxcsr_off = rsp_after_call_off,
193 r15_off = -11,
194 r14_off = -10,
195 r13_off = -9,
196 r12_off = -8,
197 rbx_off = -7,
198 call_wrapper_off = -6,
199 result_off = -5,
200 result_type_off = -4,
201 method_off = -3,
202 entry_point_off = -2,
203 parameters_off = -1,
204 rbp_off = 0,
205 retaddr_off = 1,
206 parameter_size_off = 2,
207 thread_off = 3
208 #endif
209 };
211 address generate_call_stub(address& return_address) {
212 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
213 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
214 "adjust this code");
215 StubCodeMark mark(this, "StubRoutines", "call_stub");
216 address start = __ pc();
218 // same as in generate_catch_exception()!
219 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
221 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
222 const Address result (rbp, result_off * wordSize);
223 const Address result_type (rbp, result_type_off * wordSize);
224 const Address method (rbp, method_off * wordSize);
225 const Address entry_point (rbp, entry_point_off * wordSize);
226 const Address parameters (rbp, parameters_off * wordSize);
227 const Address parameter_size(rbp, parameter_size_off * wordSize);
229 // same as in generate_catch_exception()!
230 const Address thread (rbp, thread_off * wordSize);
232 const Address r15_save(rbp, r15_off * wordSize);
233 const Address r14_save(rbp, r14_off * wordSize);
234 const Address r13_save(rbp, r13_off * wordSize);
235 const Address r12_save(rbp, r12_off * wordSize);
236 const Address rbx_save(rbp, rbx_off * wordSize);
238 // stub code
239 __ enter();
240 __ subptr(rsp, -rsp_after_call_off * wordSize);
242 // save register parameters
243 #ifndef _WIN64
244 __ movptr(parameters, c_rarg5); // parameters
245 __ movptr(entry_point, c_rarg4); // entry_point
246 #endif
248 __ movptr(method, c_rarg3); // method
249 __ movl(result_type, c_rarg2); // result type
250 __ movptr(result, c_rarg1); // result
251 __ movptr(call_wrapper, c_rarg0); // call wrapper
253 // save regs belonging to calling function
254 __ movptr(rbx_save, rbx);
255 __ movptr(r12_save, r12);
256 __ movptr(r13_save, r13);
257 __ movptr(r14_save, r14);
258 __ movptr(r15_save, r15);
260 #ifdef _WIN64
261 const Address rdi_save(rbp, rdi_off * wordSize);
262 const Address rsi_save(rbp, rsi_off * wordSize);
264 __ movptr(rsi_save, rsi);
265 __ movptr(rdi_save, rdi);
266 #else
267 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
268 {
269 Label skip_ldmx;
270 __ stmxcsr(mxcsr_save);
271 __ movl(rax, mxcsr_save);
272 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
273 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
274 __ cmp32(rax, mxcsr_std);
275 __ jcc(Assembler::equal, skip_ldmx);
276 __ ldmxcsr(mxcsr_std);
277 __ bind(skip_ldmx);
278 }
279 #endif
281 // Load up thread register
282 __ movptr(r15_thread, thread);
283 __ reinit_heapbase();
285 #ifdef ASSERT
286 // make sure we have no pending exceptions
287 {
288 Label L;
289 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
290 __ jcc(Assembler::equal, L);
291 __ stop("StubRoutines::call_stub: entered with pending exception");
292 __ bind(L);
293 }
294 #endif
296 // pass parameters if any
297 BLOCK_COMMENT("pass parameters if any");
298 Label parameters_done;
299 __ movl(c_rarg3, parameter_size);
300 __ testl(c_rarg3, c_rarg3);
301 __ jcc(Assembler::zero, parameters_done);
303 Label loop;
304 __ movptr(c_rarg2, parameters); // parameter pointer
305 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
306 __ BIND(loop);
307 __ movptr(rax, Address(c_rarg2, 0));// get parameter
308 __ addptr(c_rarg2, wordSize); // advance to next parameter
309 __ decrementl(c_rarg1); // decrement counter
310 __ push(rax); // pass parameter
311 __ jcc(Assembler::notZero, loop);
313 // call Java function
314 __ BIND(parameters_done);
315 __ movptr(rbx, method); // get methodOop
316 __ movptr(c_rarg1, entry_point); // get entry_point
317 __ mov(r13, rsp); // set sender sp
318 BLOCK_COMMENT("call Java function");
319 __ call(c_rarg1);
321 BLOCK_COMMENT("call_stub_return_address:");
322 return_address = __ pc();
324 // store result depending on type (everything that is not
325 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
326 __ movptr(c_rarg0, result);
327 Label is_long, is_float, is_double, exit;
328 __ movl(c_rarg1, result_type);
329 __ cmpl(c_rarg1, T_OBJECT);
330 __ jcc(Assembler::equal, is_long);
331 __ cmpl(c_rarg1, T_LONG);
332 __ jcc(Assembler::equal, is_long);
333 __ cmpl(c_rarg1, T_FLOAT);
334 __ jcc(Assembler::equal, is_float);
335 __ cmpl(c_rarg1, T_DOUBLE);
336 __ jcc(Assembler::equal, is_double);
338 // handle T_INT case
339 __ movl(Address(c_rarg0, 0), rax);
341 __ BIND(exit);
343 // pop parameters
344 __ lea(rsp, rsp_after_call);
346 #ifdef ASSERT
347 // verify that threads correspond
348 {
349 Label L, S;
350 __ cmpptr(r15_thread, thread);
351 __ jcc(Assembler::notEqual, S);
352 __ get_thread(rbx);
353 __ cmpptr(r15_thread, rbx);
354 __ jcc(Assembler::equal, L);
355 __ bind(S);
356 __ jcc(Assembler::equal, L);
357 __ stop("StubRoutines::call_stub: threads must correspond");
358 __ bind(L);
359 }
360 #endif
362 // restore regs belonging to calling function
363 __ movptr(r15, r15_save);
364 __ movptr(r14, r14_save);
365 __ movptr(r13, r13_save);
366 __ movptr(r12, r12_save);
367 __ movptr(rbx, rbx_save);
369 #ifdef _WIN64
370 __ movptr(rdi, rdi_save);
371 __ movptr(rsi, rsi_save);
372 #else
373 __ ldmxcsr(mxcsr_save);
374 #endif
376 // restore rsp
377 __ addptr(rsp, -rsp_after_call_off * wordSize);
379 // return
380 __ pop(rbp);
381 __ ret(0);
383 // handle return types different from T_INT
384 __ BIND(is_long);
385 __ movq(Address(c_rarg0, 0), rax);
386 __ jmp(exit);
388 __ BIND(is_float);
389 __ movflt(Address(c_rarg0, 0), xmm0);
390 __ jmp(exit);
392 __ BIND(is_double);
393 __ movdbl(Address(c_rarg0, 0), xmm0);
394 __ jmp(exit);
396 return start;
397 }
399 // Return point for a Java call if there's an exception thrown in
400 // Java code. The exception is caught and transformed into a
401 // pending exception stored in JavaThread that can be tested from
402 // within the VM.
403 //
404 // Note: Usually the parameters are removed by the callee. In case
405 // of an exception crossing an activation frame boundary, that is
406 // not the case if the callee is compiled code => need to setup the
407 // rsp.
408 //
409 // rax: exception oop
411 address generate_catch_exception() {
412 StubCodeMark mark(this, "StubRoutines", "catch_exception");
413 address start = __ pc();
415 // same as in generate_call_stub():
416 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
417 const Address thread (rbp, thread_off * wordSize);
419 #ifdef ASSERT
420 // verify that threads correspond
421 {
422 Label L, S;
423 __ cmpptr(r15_thread, thread);
424 __ jcc(Assembler::notEqual, S);
425 __ get_thread(rbx);
426 __ cmpptr(r15_thread, rbx);
427 __ jcc(Assembler::equal, L);
428 __ bind(S);
429 __ stop("StubRoutines::catch_exception: threads must correspond");
430 __ bind(L);
431 }
432 #endif
434 // set pending exception
435 __ verify_oop(rax);
437 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
438 __ lea(rscratch1, ExternalAddress((address)__FILE__));
439 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
440 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
442 // complete return to VM
443 assert(StubRoutines::_call_stub_return_address != NULL,
444 "_call_stub_return_address must have been generated before");
445 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
447 return start;
448 }
450 // Continuation point for runtime calls returning with a pending
451 // exception. The pending exception check happened in the runtime
452 // or native call stub. The pending exception in Thread is
453 // converted into a Java-level exception.
454 //
455 // Contract with Java-level exception handlers:
456 // rax: exception
457 // rdx: throwing pc
458 //
459 // NOTE: At entry of this stub, exception-pc must be on stack !!
461 address generate_forward_exception() {
462 StubCodeMark mark(this, "StubRoutines", "forward exception");
463 address start = __ pc();
465 // Upon entry, the sp points to the return address returning into
466 // Java (interpreted or compiled) code; i.e., the return address
467 // becomes the throwing pc.
468 //
469 // Arguments pushed before the runtime call are still on the stack
470 // but the exception handler will reset the stack pointer ->
471 // ignore them. A potential result in registers can be ignored as
472 // well.
474 #ifdef ASSERT
475 // make sure this code is only executed if there is a pending exception
476 {
477 Label L;
478 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
479 __ jcc(Assembler::notEqual, L);
480 __ stop("StubRoutines::forward exception: no pending exception (1)");
481 __ bind(L);
482 }
483 #endif
485 // compute exception handler into rbx
486 __ movptr(c_rarg0, Address(rsp, 0));
487 BLOCK_COMMENT("call exception_handler_for_return_address");
488 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
489 SharedRuntime::exception_handler_for_return_address),
490 r15_thread, c_rarg0);
491 __ mov(rbx, rax);
493 // setup rax & rdx, remove return address & clear pending exception
494 __ pop(rdx);
495 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
496 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
498 #ifdef ASSERT
499 // make sure exception is set
500 {
501 Label L;
502 __ testptr(rax, rax);
503 __ jcc(Assembler::notEqual, L);
504 __ stop("StubRoutines::forward exception: no pending exception (2)");
505 __ bind(L);
506 }
507 #endif
509 // continue at exception handler (return address removed)
510 // rax: exception
511 // rbx: exception handler
512 // rdx: throwing pc
513 __ verify_oop(rax);
514 __ jmp(rbx);
516 return start;
517 }
519 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
520 //
521 // Arguments :
522 // c_rarg0: exchange_value
523 // c_rarg0: dest
524 //
525 // Result:
526 // *dest <- ex, return (orig *dest)
527 address generate_atomic_xchg() {
528 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
529 address start = __ pc();
531 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
532 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
533 __ ret(0);
535 return start;
536 }
538 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
539 //
540 // Arguments :
541 // c_rarg0: exchange_value
542 // c_rarg1: dest
543 //
544 // Result:
545 // *dest <- ex, return (orig *dest)
546 address generate_atomic_xchg_ptr() {
547 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
548 address start = __ pc();
550 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
551 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
552 __ ret(0);
554 return start;
555 }
557 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
558 // jint compare_value)
559 //
560 // Arguments :
561 // c_rarg0: exchange_value
562 // c_rarg1: dest
563 // c_rarg2: compare_value
564 //
565 // Result:
566 // if ( compare_value == *dest ) {
567 // *dest = exchange_value
568 // return compare_value;
569 // else
570 // return *dest;
571 address generate_atomic_cmpxchg() {
572 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
573 address start = __ pc();
575 __ movl(rax, c_rarg2);
576 if ( os::is_MP() ) __ lock();
577 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
578 __ ret(0);
580 return start;
581 }
583 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
584 // volatile jlong* dest,
585 // jlong compare_value)
586 // Arguments :
587 // c_rarg0: exchange_value
588 // c_rarg1: dest
589 // c_rarg2: compare_value
590 //
591 // Result:
592 // if ( compare_value == *dest ) {
593 // *dest = exchange_value
594 // return compare_value;
595 // else
596 // return *dest;
597 address generate_atomic_cmpxchg_long() {
598 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
599 address start = __ pc();
601 __ movq(rax, c_rarg2);
602 if ( os::is_MP() ) __ lock();
603 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
604 __ ret(0);
606 return start;
607 }
609 // Support for jint atomic::add(jint add_value, volatile jint* dest)
610 //
611 // Arguments :
612 // c_rarg0: add_value
613 // c_rarg1: dest
614 //
615 // Result:
616 // *dest += add_value
617 // return *dest;
618 address generate_atomic_add() {
619 StubCodeMark mark(this, "StubRoutines", "atomic_add");
620 address start = __ pc();
622 __ movl(rax, c_rarg0);
623 if ( os::is_MP() ) __ lock();
624 __ xaddl(Address(c_rarg1, 0), c_rarg0);
625 __ addl(rax, c_rarg0);
626 __ ret(0);
628 return start;
629 }
631 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
632 //
633 // Arguments :
634 // c_rarg0: add_value
635 // c_rarg1: dest
636 //
637 // Result:
638 // *dest += add_value
639 // return *dest;
640 address generate_atomic_add_ptr() {
641 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
642 address start = __ pc();
644 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
645 if ( os::is_MP() ) __ lock();
646 __ xaddptr(Address(c_rarg1, 0), c_rarg0);
647 __ addptr(rax, c_rarg0);
648 __ ret(0);
650 return start;
651 }
653 // Support for intptr_t OrderAccess::fence()
654 //
655 // Arguments :
656 //
657 // Result:
658 address generate_orderaccess_fence() {
659 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
660 address start = __ pc();
661 __ membar(Assembler::StoreLoad);
662 __ ret(0);
664 return start;
665 }
667 // Support for intptr_t get_previous_fp()
668 //
669 // This routine is used to find the previous frame pointer for the
670 // caller (current_frame_guess). This is used as part of debugging
671 // ps() is seemingly lost trying to find frames.
672 // This code assumes that caller current_frame_guess) has a frame.
673 address generate_get_previous_fp() {
674 StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
675 const Address old_fp(rbp, 0);
676 const Address older_fp(rax, 0);
677 address start = __ pc();
679 __ enter();
680 __ movptr(rax, old_fp); // callers fp
681 __ movptr(rax, older_fp); // the frame for ps()
682 __ pop(rbp);
683 __ ret(0);
685 return start;
686 }
688 //----------------------------------------------------------------------------------------------------
689 // Support for void verify_mxcsr()
690 //
691 // This routine is used with -Xcheck:jni to verify that native
692 // JNI code does not return to Java code without restoring the
693 // MXCSR register to our expected state.
695 address generate_verify_mxcsr() {
696 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
697 address start = __ pc();
699 const Address mxcsr_save(rsp, 0);
701 if (CheckJNICalls) {
702 Label ok_ret;
703 __ push(rax);
704 __ subptr(rsp, wordSize); // allocate a temp location
705 __ stmxcsr(mxcsr_save);
706 __ movl(rax, mxcsr_save);
707 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
708 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
709 __ jcc(Assembler::equal, ok_ret);
711 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
713 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
715 __ bind(ok_ret);
716 __ addptr(rsp, wordSize);
717 __ pop(rax);
718 }
720 __ ret(0);
722 return start;
723 }
725 address generate_f2i_fixup() {
726 StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
727 Address inout(rsp, 5 * wordSize); // return address + 4 saves
729 address start = __ pc();
731 Label L;
733 __ push(rax);
734 __ push(c_rarg3);
735 __ push(c_rarg2);
736 __ push(c_rarg1);
738 __ movl(rax, 0x7f800000);
739 __ xorl(c_rarg3, c_rarg3);
740 __ movl(c_rarg2, inout);
741 __ movl(c_rarg1, c_rarg2);
742 __ andl(c_rarg1, 0x7fffffff);
743 __ cmpl(rax, c_rarg1); // NaN? -> 0
744 __ jcc(Assembler::negative, L);
745 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
746 __ movl(c_rarg3, 0x80000000);
747 __ movl(rax, 0x7fffffff);
748 __ cmovl(Assembler::positive, c_rarg3, rax);
750 __ bind(L);
751 __ movptr(inout, c_rarg3);
753 __ pop(c_rarg1);
754 __ pop(c_rarg2);
755 __ pop(c_rarg3);
756 __ pop(rax);
758 __ ret(0);
760 return start;
761 }
763 address generate_f2l_fixup() {
764 StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
765 Address inout(rsp, 5 * wordSize); // return address + 4 saves
766 address start = __ pc();
768 Label L;
770 __ push(rax);
771 __ push(c_rarg3);
772 __ push(c_rarg2);
773 __ push(c_rarg1);
775 __ movl(rax, 0x7f800000);
776 __ xorl(c_rarg3, c_rarg3);
777 __ movl(c_rarg2, inout);
778 __ movl(c_rarg1, c_rarg2);
779 __ andl(c_rarg1, 0x7fffffff);
780 __ cmpl(rax, c_rarg1); // NaN? -> 0
781 __ jcc(Assembler::negative, L);
782 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
783 __ mov64(c_rarg3, 0x8000000000000000);
784 __ mov64(rax, 0x7fffffffffffffff);
785 __ cmov(Assembler::positive, c_rarg3, rax);
787 __ bind(L);
788 __ movptr(inout, c_rarg3);
790 __ pop(c_rarg1);
791 __ pop(c_rarg2);
792 __ pop(c_rarg3);
793 __ pop(rax);
795 __ ret(0);
797 return start;
798 }
800 address generate_d2i_fixup() {
801 StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
802 Address inout(rsp, 6 * wordSize); // return address + 5 saves
804 address start = __ pc();
806 Label L;
808 __ push(rax);
809 __ push(c_rarg3);
810 __ push(c_rarg2);
811 __ push(c_rarg1);
812 __ push(c_rarg0);
814 __ movl(rax, 0x7ff00000);
815 __ movq(c_rarg2, inout);
816 __ movl(c_rarg3, c_rarg2);
817 __ mov(c_rarg1, c_rarg2);
818 __ mov(c_rarg0, c_rarg2);
819 __ negl(c_rarg3);
820 __ shrptr(c_rarg1, 0x20);
821 __ orl(c_rarg3, c_rarg2);
822 __ andl(c_rarg1, 0x7fffffff);
823 __ xorl(c_rarg2, c_rarg2);
824 __ shrl(c_rarg3, 0x1f);
825 __ orl(c_rarg1, c_rarg3);
826 __ cmpl(rax, c_rarg1);
827 __ jcc(Assembler::negative, L); // NaN -> 0
828 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
829 __ movl(c_rarg2, 0x80000000);
830 __ movl(rax, 0x7fffffff);
831 __ cmov(Assembler::positive, c_rarg2, rax);
833 __ bind(L);
834 __ movptr(inout, c_rarg2);
836 __ pop(c_rarg0);
837 __ pop(c_rarg1);
838 __ pop(c_rarg2);
839 __ pop(c_rarg3);
840 __ pop(rax);
842 __ ret(0);
844 return start;
845 }
847 address generate_d2l_fixup() {
848 StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
849 Address inout(rsp, 6 * wordSize); // return address + 5 saves
851 address start = __ pc();
853 Label L;
855 __ push(rax);
856 __ push(c_rarg3);
857 __ push(c_rarg2);
858 __ push(c_rarg1);
859 __ push(c_rarg0);
861 __ movl(rax, 0x7ff00000);
862 __ movq(c_rarg2, inout);
863 __ movl(c_rarg3, c_rarg2);
864 __ mov(c_rarg1, c_rarg2);
865 __ mov(c_rarg0, c_rarg2);
866 __ negl(c_rarg3);
867 __ shrptr(c_rarg1, 0x20);
868 __ orl(c_rarg3, c_rarg2);
869 __ andl(c_rarg1, 0x7fffffff);
870 __ xorl(c_rarg2, c_rarg2);
871 __ shrl(c_rarg3, 0x1f);
872 __ orl(c_rarg1, c_rarg3);
873 __ cmpl(rax, c_rarg1);
874 __ jcc(Assembler::negative, L); // NaN -> 0
875 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
876 __ mov64(c_rarg2, 0x8000000000000000);
877 __ mov64(rax, 0x7fffffffffffffff);
878 __ cmovq(Assembler::positive, c_rarg2, rax);
880 __ bind(L);
881 __ movq(inout, c_rarg2);
883 __ pop(c_rarg0);
884 __ pop(c_rarg1);
885 __ pop(c_rarg2);
886 __ pop(c_rarg3);
887 __ pop(rax);
889 __ ret(0);
891 return start;
892 }
894 address generate_fp_mask(const char *stub_name, int64_t mask) {
895 __ align(CodeEntryAlignment);
896 StubCodeMark mark(this, "StubRoutines", stub_name);
897 address start = __ pc();
899 __ emit_data64( mask, relocInfo::none );
900 __ emit_data64( mask, relocInfo::none );
902 return start;
903 }
905 // The following routine generates a subroutine to throw an
906 // asynchronous UnknownError when an unsafe access gets a fault that
907 // could not be reasonably prevented by the programmer. (Example:
908 // SIGBUS/OBJERR.)
909 address generate_handler_for_unsafe_access() {
910 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
911 address start = __ pc();
913 __ push(0); // hole for return address-to-be
914 __ pusha(); // push registers
915 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
917 __ subptr(rsp, frame::arg_reg_save_area_bytes);
918 BLOCK_COMMENT("call handle_unsafe_access");
919 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
920 __ addptr(rsp, frame::arg_reg_save_area_bytes);
922 __ movptr(next_pc, rax); // stuff next address
923 __ popa();
924 __ ret(0); // jump to next address
926 return start;
927 }
929 // Non-destructive plausibility checks for oops
930 //
931 // Arguments:
932 // all args on stack!
933 //
934 // Stack after saving c_rarg3:
935 // [tos + 0]: saved c_rarg3
936 // [tos + 1]: saved c_rarg2
937 // [tos + 2]: saved r12 (several TemplateTable methods use it)
938 // [tos + 3]: saved flags
939 // [tos + 4]: return address
940 // * [tos + 5]: error message (char*)
941 // * [tos + 6]: object to verify (oop)
942 // * [tos + 7]: saved rax - saved by caller and bashed
943 // * [tos + 8]: saved r10 (rscratch1) - saved by caller
944 // * = popped on exit
945 address generate_verify_oop() {
946 StubCodeMark mark(this, "StubRoutines", "verify_oop");
947 address start = __ pc();
949 Label exit, error;
951 __ pushf();
952 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
954 __ push(r12);
956 // save c_rarg2 and c_rarg3
957 __ push(c_rarg2);
958 __ push(c_rarg3);
960 enum {
961 // After previous pushes.
962 oop_to_verify = 6 * wordSize,
963 saved_rax = 7 * wordSize,
964 saved_r10 = 8 * wordSize,
966 // Before the call to MacroAssembler::debug(), see below.
967 return_addr = 16 * wordSize,
968 error_msg = 17 * wordSize
969 };
971 // get object
972 __ movptr(rax, Address(rsp, oop_to_verify));
974 // make sure object is 'reasonable'
975 __ testptr(rax, rax);
976 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
977 // Check if the oop is in the right area of memory
978 __ movptr(c_rarg2, rax);
979 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
980 __ andptr(c_rarg2, c_rarg3);
981 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
982 __ cmpptr(c_rarg2, c_rarg3);
983 __ jcc(Assembler::notZero, error);
985 // set r12 to heapbase for load_klass()
986 __ reinit_heapbase();
988 // make sure klass is 'reasonable'
989 __ load_klass(rax, rax); // get klass
990 __ testptr(rax, rax);
991 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
992 // Check if the klass is in the right area of memory
993 __ mov(c_rarg2, rax);
994 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
995 __ andptr(c_rarg2, c_rarg3);
996 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
997 __ cmpptr(c_rarg2, c_rarg3);
998 __ jcc(Assembler::notZero, error);
1000 // make sure klass' klass is 'reasonable'
1001 __ load_klass(rax, rax);
1002 __ testptr(rax, rax);
1003 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
1004 // Check if the klass' klass is in the right area of memory
1005 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
1006 __ andptr(rax, c_rarg3);
1007 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
1008 __ cmpptr(rax, c_rarg3);
1009 __ jcc(Assembler::notZero, error);
1011 // return if everything seems ok
1012 __ bind(exit);
1013 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1014 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1015 __ pop(c_rarg3); // restore c_rarg3
1016 __ pop(c_rarg2); // restore c_rarg2
1017 __ pop(r12); // restore r12
1018 __ popf(); // restore flags
1019 __ ret(4 * wordSize); // pop caller saved stuff
1021 // handle errors
1022 __ bind(error);
1023 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
1024 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back
1025 __ pop(c_rarg3); // get saved c_rarg3 back
1026 __ pop(c_rarg2); // get saved c_rarg2 back
1027 __ pop(r12); // get saved r12 back
1028 __ popf(); // get saved flags off stack --
1029 // will be ignored
1031 __ pusha(); // push registers
1032 // (rip is already
1033 // already pushed)
1034 // debug(char* msg, int64_t pc, int64_t regs[])
1035 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1036 // pushed all the registers, so now the stack looks like:
1037 // [tos + 0] 16 saved registers
1038 // [tos + 16] return address
1039 // * [tos + 17] error message (char*)
1040 // * [tos + 18] object to verify (oop)
1041 // * [tos + 19] saved rax - saved by caller and bashed
1042 // * [tos + 20] saved r10 (rscratch1) - saved by caller
1043 // * = popped on exit
1045 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
1046 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
1047 __ movq(c_rarg2, rsp); // pass address of regs on stack
1048 __ mov(r12, rsp); // remember rsp
1049 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1050 __ andptr(rsp, -16); // align stack as required by ABI
1051 BLOCK_COMMENT("call MacroAssembler::debug");
1052 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1053 __ mov(rsp, r12); // restore rsp
1054 __ popa(); // pop registers (includes r12)
1055 __ ret(4 * wordSize); // pop caller saved stuff
1057 return start;
1058 }
1060 static address disjoint_byte_copy_entry;
1061 static address disjoint_short_copy_entry;
1062 static address disjoint_int_copy_entry;
1063 static address disjoint_long_copy_entry;
1064 static address disjoint_oop_copy_entry;
1066 static address byte_copy_entry;
1067 static address short_copy_entry;
1068 static address int_copy_entry;
1069 static address long_copy_entry;
1070 static address oop_copy_entry;
1072 static address checkcast_copy_entry;
1074 //
1075 // Verify that a register contains clean 32-bits positive value
1076 // (high 32-bits are 0) so it could be used in 64-bits shifts.
1077 //
1078 // Input:
1079 // Rint - 32-bits value
1080 // Rtmp - scratch
1081 //
1082 void assert_clean_int(Register Rint, Register Rtmp) {
1083 #ifdef ASSERT
1084 Label L;
1085 assert_different_registers(Rtmp, Rint);
1086 __ movslq(Rtmp, Rint);
1087 __ cmpq(Rtmp, Rint);
1088 __ jcc(Assembler::equal, L);
1089 __ stop("high 32-bits of int value are not 0");
1090 __ bind(L);
1091 #endif
1092 }
1094 // Generate overlap test for array copy stubs
1095 //
1096 // Input:
1097 // c_rarg0 - from
1098 // c_rarg1 - to
1099 // c_rarg2 - element count
1100 //
1101 // Output:
1102 // rax - &from[element count - 1]
1103 //
1104 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1105 assert(no_overlap_target != NULL, "must be generated");
1106 array_overlap_test(no_overlap_target, NULL, sf);
1107 }
1108 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1109 array_overlap_test(NULL, &L_no_overlap, sf);
1110 }
1111 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1112 const Register from = c_rarg0;
1113 const Register to = c_rarg1;
1114 const Register count = c_rarg2;
1115 const Register end_from = rax;
1117 __ cmpptr(to, from);
1118 __ lea(end_from, Address(from, count, sf, 0));
1119 if (NOLp == NULL) {
1120 ExternalAddress no_overlap(no_overlap_target);
1121 __ jump_cc(Assembler::belowEqual, no_overlap);
1122 __ cmpptr(to, end_from);
1123 __ jump_cc(Assembler::aboveEqual, no_overlap);
1124 } else {
1125 __ jcc(Assembler::belowEqual, (*NOLp));
1126 __ cmpptr(to, end_from);
1127 __ jcc(Assembler::aboveEqual, (*NOLp));
1128 }
1129 }
1131 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1132 //
1133 // Outputs:
1134 // rdi - rcx
1135 // rsi - rdx
1136 // rdx - r8
1137 // rcx - r9
1138 //
1139 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1140 // are non-volatile. r9 and r10 should not be used by the caller.
1141 //
1142 void setup_arg_regs(int nargs = 3) {
1143 const Register saved_rdi = r9;
1144 const Register saved_rsi = r10;
1145 assert(nargs == 3 || nargs == 4, "else fix");
1146 #ifdef _WIN64
1147 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1148 "unexpected argument registers");
1149 if (nargs >= 4)
1150 __ mov(rax, r9); // r9 is also saved_rdi
1151 __ movptr(saved_rdi, rdi);
1152 __ movptr(saved_rsi, rsi);
1153 __ mov(rdi, rcx); // c_rarg0
1154 __ mov(rsi, rdx); // c_rarg1
1155 __ mov(rdx, r8); // c_rarg2
1156 if (nargs >= 4)
1157 __ mov(rcx, rax); // c_rarg3 (via rax)
1158 #else
1159 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1160 "unexpected argument registers");
1161 #endif
1162 }
1164 void restore_arg_regs() {
1165 const Register saved_rdi = r9;
1166 const Register saved_rsi = r10;
1167 #ifdef _WIN64
1168 __ movptr(rdi, saved_rdi);
1169 __ movptr(rsi, saved_rsi);
1170 #endif
1171 }
1173 // Generate code for an array write pre barrier
1174 //
1175 // addr - starting address
1176 // count - element count
1177 //
1178 // Destroy no registers!
1179 //
1180 void gen_write_ref_array_pre_barrier(Register addr, Register count) {
1181 BarrierSet* bs = Universe::heap()->barrier_set();
1182 switch (bs->kind()) {
1183 case BarrierSet::G1SATBCT:
1184 case BarrierSet::G1SATBCTLogging:
1185 {
1186 __ pusha(); // push registers
1187 if (count == c_rarg0) {
1188 if (addr == c_rarg1) {
1189 // exactly backwards!!
1190 __ xchgptr(c_rarg1, c_rarg0);
1191 } else {
1192 __ movptr(c_rarg1, count);
1193 __ movptr(c_rarg0, addr);
1194 }
1196 } else {
1197 __ movptr(c_rarg0, addr);
1198 __ movptr(c_rarg1, count);
1199 }
1200 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
1201 __ popa();
1202 }
1203 break;
1204 case BarrierSet::CardTableModRef:
1205 case BarrierSet::CardTableExtension:
1206 case BarrierSet::ModRef:
1207 break;
1208 default:
1209 ShouldNotReachHere();
1211 }
1212 }
1214 //
1215 // Generate code for an array write post barrier
1216 //
1217 // Input:
1218 // start - register containing starting address of destination array
1219 // end - register containing ending address of destination array
1220 // scratch - scratch register
1221 //
1222 // The input registers are overwritten.
1223 // The ending address is inclusive.
1224 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1225 assert_different_registers(start, end, scratch);
1226 BarrierSet* bs = Universe::heap()->barrier_set();
1227 switch (bs->kind()) {
1228 case BarrierSet::G1SATBCT:
1229 case BarrierSet::G1SATBCTLogging:
1231 {
1232 __ pusha(); // push registers (overkill)
1233 // must compute element count unless barrier set interface is changed (other platforms supply count)
1234 assert_different_registers(start, end, scratch);
1235 __ lea(scratch, Address(end, BytesPerHeapOop));
1236 __ subptr(scratch, start); // subtract start to get #bytes
1237 __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count
1238 __ mov(c_rarg0, start);
1239 __ mov(c_rarg1, scratch);
1240 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
1241 __ popa();
1242 }
1243 break;
1244 case BarrierSet::CardTableModRef:
1245 case BarrierSet::CardTableExtension:
1246 {
1247 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1248 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1250 Label L_loop;
1252 __ shrptr(start, CardTableModRefBS::card_shift);
1253 __ addptr(end, BytesPerHeapOop);
1254 __ shrptr(end, CardTableModRefBS::card_shift);
1255 __ subptr(end, start); // number of bytes to copy
1257 intptr_t disp = (intptr_t) ct->byte_map_base;
1258 if (__ is_simm32(disp)) {
1259 Address cardtable(noreg, noreg, Address::no_scale, disp);
1260 __ lea(scratch, cardtable);
1261 } else {
1262 ExternalAddress cardtable((address)disp);
1263 __ lea(scratch, cardtable);
1264 }
1266 const Register count = end; // 'end' register contains bytes count now
1267 __ addptr(start, scratch);
1268 __ BIND(L_loop);
1269 __ movb(Address(start, count, Address::times_1), 0);
1270 __ decrement(count);
1271 __ jcc(Assembler::greaterEqual, L_loop);
1272 }
1273 break;
1274 default:
1275 ShouldNotReachHere();
1277 }
1278 }
1281 // Copy big chunks forward
1282 //
1283 // Inputs:
1284 // end_from - source arrays end address
1285 // end_to - destination array end address
1286 // qword_count - 64-bits element count, negative
1287 // to - scratch
1288 // L_copy_32_bytes - entry label
1289 // L_copy_8_bytes - exit label
1290 //
1291 void copy_32_bytes_forward(Register end_from, Register end_to,
1292 Register qword_count, Register to,
1293 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1294 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1295 Label L_loop;
1296 __ align(OptoLoopAlignment);
1297 __ BIND(L_loop);
1298 if(UseUnalignedLoadStores) {
1299 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1300 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1301 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
1302 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
1304 } else {
1305 __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1306 __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1307 __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1308 __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1309 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1310 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1311 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1312 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1313 }
1314 __ BIND(L_copy_32_bytes);
1315 __ addptr(qword_count, 4);
1316 __ jcc(Assembler::lessEqual, L_loop);
1317 __ subptr(qword_count, 4);
1318 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1319 }
1322 // Copy big chunks backward
1323 //
1324 // Inputs:
1325 // from - source arrays address
1326 // dest - destination array address
1327 // qword_count - 64-bits element count
1328 // to - scratch
1329 // L_copy_32_bytes - entry label
1330 // L_copy_8_bytes - exit label
1331 //
1332 void copy_32_bytes_backward(Register from, Register dest,
1333 Register qword_count, Register to,
1334 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1335 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1336 Label L_loop;
1337 __ align(OptoLoopAlignment);
1338 __ BIND(L_loop);
1339 if(UseUnalignedLoadStores) {
1340 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
1341 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
1342 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0));
1343 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1);
1345 } else {
1346 __ movq(to, Address(from, qword_count, Address::times_8, 24));
1347 __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1348 __ movq(to, Address(from, qword_count, Address::times_8, 16));
1349 __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1350 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1351 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1352 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1353 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1354 }
1355 __ BIND(L_copy_32_bytes);
1356 __ subptr(qword_count, 4);
1357 __ jcc(Assembler::greaterEqual, L_loop);
1358 __ addptr(qword_count, 4);
1359 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1360 }
1363 // Arguments:
1364 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1365 // ignored
1366 // name - stub name string
1367 //
1368 // Inputs:
1369 // c_rarg0 - source array address
1370 // c_rarg1 - destination array address
1371 // c_rarg2 - element count, treated as ssize_t, can be zero
1372 //
1373 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1374 // we let the hardware handle it. The one to eight bytes within words,
1375 // dwords or qwords that span cache line boundaries will still be loaded
1376 // and stored atomically.
1377 //
1378 // Side Effects:
1379 // disjoint_byte_copy_entry is set to the no-overlap entry point
1380 // used by generate_conjoint_byte_copy().
1381 //
1382 address generate_disjoint_byte_copy(bool aligned, const char *name) {
1383 __ align(CodeEntryAlignment);
1384 StubCodeMark mark(this, "StubRoutines", name);
1385 address start = __ pc();
1387 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1388 Label L_copy_byte, L_exit;
1389 const Register from = rdi; // source array address
1390 const Register to = rsi; // destination array address
1391 const Register count = rdx; // elements count
1392 const Register byte_count = rcx;
1393 const Register qword_count = count;
1394 const Register end_from = from; // source array end address
1395 const Register end_to = to; // destination array end address
1396 // End pointers are inclusive, and if count is not zero they point
1397 // to the last unit copied: end_to[0] := end_from[0]
1399 __ enter(); // required for proper stackwalking of RuntimeStub frame
1400 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1402 disjoint_byte_copy_entry = __ pc();
1403 BLOCK_COMMENT("Entry:");
1404 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1406 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1407 // r9 and r10 may be used to save non-volatile registers
1409 // 'from', 'to' and 'count' are now valid
1410 __ movptr(byte_count, count);
1411 __ shrptr(count, 3); // count => qword_count
1413 // Copy from low to high addresses. Use 'to' as scratch.
1414 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1415 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1416 __ negptr(qword_count); // make the count negative
1417 __ jmp(L_copy_32_bytes);
1419 // Copy trailing qwords
1420 __ BIND(L_copy_8_bytes);
1421 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1422 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1423 __ increment(qword_count);
1424 __ jcc(Assembler::notZero, L_copy_8_bytes);
1426 // Check for and copy trailing dword
1427 __ BIND(L_copy_4_bytes);
1428 __ testl(byte_count, 4);
1429 __ jccb(Assembler::zero, L_copy_2_bytes);
1430 __ movl(rax, Address(end_from, 8));
1431 __ movl(Address(end_to, 8), rax);
1433 __ addptr(end_from, 4);
1434 __ addptr(end_to, 4);
1436 // Check for and copy trailing word
1437 __ BIND(L_copy_2_bytes);
1438 __ testl(byte_count, 2);
1439 __ jccb(Assembler::zero, L_copy_byte);
1440 __ movw(rax, Address(end_from, 8));
1441 __ movw(Address(end_to, 8), rax);
1443 __ addptr(end_from, 2);
1444 __ addptr(end_to, 2);
1446 // Check for and copy trailing byte
1447 __ BIND(L_copy_byte);
1448 __ testl(byte_count, 1);
1449 __ jccb(Assembler::zero, L_exit);
1450 __ movb(rax, Address(end_from, 8));
1451 __ movb(Address(end_to, 8), rax);
1453 __ BIND(L_exit);
1454 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1455 restore_arg_regs();
1456 __ xorptr(rax, rax); // return 0
1457 __ leave(); // required for proper stackwalking of RuntimeStub frame
1458 __ ret(0);
1460 // Copy in 32-bytes chunks
1461 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1462 __ jmp(L_copy_4_bytes);
1464 return start;
1465 }
1467 // Arguments:
1468 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1469 // ignored
1470 // name - stub name string
1471 //
1472 // Inputs:
1473 // c_rarg0 - source array address
1474 // c_rarg1 - destination array address
1475 // c_rarg2 - element count, treated as ssize_t, can be zero
1476 //
1477 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1478 // we let the hardware handle it. The one to eight bytes within words,
1479 // dwords or qwords that span cache line boundaries will still be loaded
1480 // and stored atomically.
1481 //
1482 address generate_conjoint_byte_copy(bool aligned, const char *name) {
1483 __ align(CodeEntryAlignment);
1484 StubCodeMark mark(this, "StubRoutines", name);
1485 address start = __ pc();
1487 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1488 const Register from = rdi; // source array address
1489 const Register to = rsi; // destination array address
1490 const Register count = rdx; // elements count
1491 const Register byte_count = rcx;
1492 const Register qword_count = count;
1494 __ enter(); // required for proper stackwalking of RuntimeStub frame
1495 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1497 byte_copy_entry = __ pc();
1498 BLOCK_COMMENT("Entry:");
1499 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1501 array_overlap_test(disjoint_byte_copy_entry, Address::times_1);
1502 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1503 // r9 and r10 may be used to save non-volatile registers
1505 // 'from', 'to' and 'count' are now valid
1506 __ movptr(byte_count, count);
1507 __ shrptr(count, 3); // count => qword_count
1509 // Copy from high to low addresses.
1511 // Check for and copy trailing byte
1512 __ testl(byte_count, 1);
1513 __ jcc(Assembler::zero, L_copy_2_bytes);
1514 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1515 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1516 __ decrement(byte_count); // Adjust for possible trailing word
1518 // Check for and copy trailing word
1519 __ BIND(L_copy_2_bytes);
1520 __ testl(byte_count, 2);
1521 __ jcc(Assembler::zero, L_copy_4_bytes);
1522 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1523 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1525 // Check for and copy trailing dword
1526 __ BIND(L_copy_4_bytes);
1527 __ testl(byte_count, 4);
1528 __ jcc(Assembler::zero, L_copy_32_bytes);
1529 __ movl(rax, Address(from, qword_count, Address::times_8));
1530 __ movl(Address(to, qword_count, Address::times_8), rax);
1531 __ jmp(L_copy_32_bytes);
1533 // Copy trailing qwords
1534 __ BIND(L_copy_8_bytes);
1535 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1536 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1537 __ decrement(qword_count);
1538 __ jcc(Assembler::notZero, L_copy_8_bytes);
1540 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1541 restore_arg_regs();
1542 __ xorptr(rax, rax); // return 0
1543 __ leave(); // required for proper stackwalking of RuntimeStub frame
1544 __ ret(0);
1546 // Copy in 32-bytes chunks
1547 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1549 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1550 restore_arg_regs();
1551 __ xorptr(rax, rax); // return 0
1552 __ leave(); // required for proper stackwalking of RuntimeStub frame
1553 __ ret(0);
1555 return start;
1556 }
1558 // Arguments:
1559 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1560 // ignored
1561 // name - stub name string
1562 //
1563 // Inputs:
1564 // c_rarg0 - source array address
1565 // c_rarg1 - destination array address
1566 // c_rarg2 - element count, treated as ssize_t, can be zero
1567 //
1568 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1569 // let the hardware handle it. The two or four words within dwords
1570 // or qwords that span cache line boundaries will still be loaded
1571 // and stored atomically.
1572 //
1573 // Side Effects:
1574 // disjoint_short_copy_entry is set to the no-overlap entry point
1575 // used by generate_conjoint_short_copy().
1576 //
1577 address generate_disjoint_short_copy(bool aligned, const char *name) {
1578 __ align(CodeEntryAlignment);
1579 StubCodeMark mark(this, "StubRoutines", name);
1580 address start = __ pc();
1582 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit;
1583 const Register from = rdi; // source array address
1584 const Register to = rsi; // destination array address
1585 const Register count = rdx; // elements count
1586 const Register word_count = rcx;
1587 const Register qword_count = count;
1588 const Register end_from = from; // source array end address
1589 const Register end_to = to; // destination array end address
1590 // End pointers are inclusive, and if count is not zero they point
1591 // to the last unit copied: end_to[0] := end_from[0]
1593 __ enter(); // required for proper stackwalking of RuntimeStub frame
1594 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1596 disjoint_short_copy_entry = __ pc();
1597 BLOCK_COMMENT("Entry:");
1598 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1600 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1601 // r9 and r10 may be used to save non-volatile registers
1603 // 'from', 'to' and 'count' are now valid
1604 __ movptr(word_count, count);
1605 __ shrptr(count, 2); // count => qword_count
1607 // Copy from low to high addresses. Use 'to' as scratch.
1608 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1609 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1610 __ negptr(qword_count);
1611 __ jmp(L_copy_32_bytes);
1613 // Copy trailing qwords
1614 __ BIND(L_copy_8_bytes);
1615 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1616 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1617 __ increment(qword_count);
1618 __ jcc(Assembler::notZero, L_copy_8_bytes);
1620 // Original 'dest' is trashed, so we can't use it as a
1621 // base register for a possible trailing word copy
1623 // Check for and copy trailing dword
1624 __ BIND(L_copy_4_bytes);
1625 __ testl(word_count, 2);
1626 __ jccb(Assembler::zero, L_copy_2_bytes);
1627 __ movl(rax, Address(end_from, 8));
1628 __ movl(Address(end_to, 8), rax);
1630 __ addptr(end_from, 4);
1631 __ addptr(end_to, 4);
1633 // Check for and copy trailing word
1634 __ BIND(L_copy_2_bytes);
1635 __ testl(word_count, 1);
1636 __ jccb(Assembler::zero, L_exit);
1637 __ movw(rax, Address(end_from, 8));
1638 __ movw(Address(end_to, 8), rax);
1640 __ BIND(L_exit);
1641 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1642 restore_arg_regs();
1643 __ xorptr(rax, rax); // return 0
1644 __ leave(); // required for proper stackwalking of RuntimeStub frame
1645 __ ret(0);
1647 // Copy in 32-bytes chunks
1648 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1649 __ jmp(L_copy_4_bytes);
1651 return start;
1652 }
1654 address generate_fill(BasicType t, bool aligned, const char *name) {
1655 __ align(CodeEntryAlignment);
1656 StubCodeMark mark(this, "StubRoutines", name);
1657 address start = __ pc();
1659 BLOCK_COMMENT("Entry:");
1661 const Register to = c_rarg0; // source array address
1662 const Register value = c_rarg1; // value
1663 const Register count = c_rarg2; // elements count
1665 __ enter(); // required for proper stackwalking of RuntimeStub frame
1667 __ generate_fill(t, aligned, to, value, count, rax, xmm0);
1669 __ leave(); // required for proper stackwalking of RuntimeStub frame
1670 __ ret(0);
1671 return start;
1672 }
1674 // Arguments:
1675 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1676 // ignored
1677 // name - stub name string
1678 //
1679 // Inputs:
1680 // c_rarg0 - source array address
1681 // c_rarg1 - destination array address
1682 // c_rarg2 - element count, treated as ssize_t, can be zero
1683 //
1684 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1685 // let the hardware handle it. The two or four words within dwords
1686 // or qwords that span cache line boundaries will still be loaded
1687 // and stored atomically.
1688 //
1689 address generate_conjoint_short_copy(bool aligned, const char *name) {
1690 __ align(CodeEntryAlignment);
1691 StubCodeMark mark(this, "StubRoutines", name);
1692 address start = __ pc();
1694 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes;
1695 const Register from = rdi; // source array address
1696 const Register to = rsi; // destination array address
1697 const Register count = rdx; // elements count
1698 const Register word_count = rcx;
1699 const Register qword_count = count;
1701 __ enter(); // required for proper stackwalking of RuntimeStub frame
1702 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1704 short_copy_entry = __ pc();
1705 BLOCK_COMMENT("Entry:");
1706 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1708 array_overlap_test(disjoint_short_copy_entry, Address::times_2);
1709 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1710 // r9 and r10 may be used to save non-volatile registers
1712 // 'from', 'to' and 'count' are now valid
1713 __ movptr(word_count, count);
1714 __ shrptr(count, 2); // count => qword_count
1716 // Copy from high to low addresses. Use 'to' as scratch.
1718 // Check for and copy trailing word
1719 __ testl(word_count, 1);
1720 __ jccb(Assembler::zero, L_copy_4_bytes);
1721 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1722 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1724 // Check for and copy trailing dword
1725 __ BIND(L_copy_4_bytes);
1726 __ testl(word_count, 2);
1727 __ jcc(Assembler::zero, L_copy_32_bytes);
1728 __ movl(rax, Address(from, qword_count, Address::times_8));
1729 __ movl(Address(to, qword_count, Address::times_8), rax);
1730 __ jmp(L_copy_32_bytes);
1732 // Copy trailing qwords
1733 __ BIND(L_copy_8_bytes);
1734 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1735 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1736 __ decrement(qword_count);
1737 __ jcc(Assembler::notZero, L_copy_8_bytes);
1739 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1740 restore_arg_regs();
1741 __ xorptr(rax, rax); // return 0
1742 __ leave(); // required for proper stackwalking of RuntimeStub frame
1743 __ ret(0);
1745 // Copy in 32-bytes chunks
1746 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1748 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1749 restore_arg_regs();
1750 __ xorptr(rax, rax); // return 0
1751 __ leave(); // required for proper stackwalking of RuntimeStub frame
1752 __ ret(0);
1754 return start;
1755 }
1757 // Arguments:
1758 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1759 // ignored
1760 // is_oop - true => oop array, so generate store check code
1761 // name - stub name string
1762 //
1763 // Inputs:
1764 // c_rarg0 - source array address
1765 // c_rarg1 - destination array address
1766 // c_rarg2 - element count, treated as ssize_t, can be zero
1767 //
1768 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1769 // the hardware handle it. The two dwords within qwords that span
1770 // cache line boundaries will still be loaded and stored atomicly.
1771 //
1772 // Side Effects:
1773 // disjoint_int_copy_entry is set to the no-overlap entry point
1774 // used by generate_conjoint_int_oop_copy().
1775 //
1776 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
1777 __ align(CodeEntryAlignment);
1778 StubCodeMark mark(this, "StubRoutines", name);
1779 address start = __ pc();
1781 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1782 const Register from = rdi; // source array address
1783 const Register to = rsi; // destination array address
1784 const Register count = rdx; // elements count
1785 const Register dword_count = rcx;
1786 const Register qword_count = count;
1787 const Register end_from = from; // source array end address
1788 const Register end_to = to; // destination array end address
1789 const Register saved_to = r11; // saved destination array address
1790 // End pointers are inclusive, and if count is not zero they point
1791 // to the last unit copied: end_to[0] := end_from[0]
1793 __ enter(); // required for proper stackwalking of RuntimeStub frame
1794 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1796 (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc();
1798 if (is_oop) {
1799 // no registers are destroyed by this call
1800 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1801 }
1803 BLOCK_COMMENT("Entry:");
1804 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1806 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1807 // r9 and r10 may be used to save non-volatile registers
1809 if (is_oop) {
1810 __ movq(saved_to, to);
1811 }
1813 // 'from', 'to' and 'count' are now valid
1814 __ movptr(dword_count, count);
1815 __ shrptr(count, 1); // count => qword_count
1817 // Copy from low to high addresses. Use 'to' as scratch.
1818 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1819 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1820 __ negptr(qword_count);
1821 __ jmp(L_copy_32_bytes);
1823 // Copy trailing qwords
1824 __ BIND(L_copy_8_bytes);
1825 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1826 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1827 __ increment(qword_count);
1828 __ jcc(Assembler::notZero, L_copy_8_bytes);
1830 // Check for and copy trailing dword
1831 __ BIND(L_copy_4_bytes);
1832 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1833 __ jccb(Assembler::zero, L_exit);
1834 __ movl(rax, Address(end_from, 8));
1835 __ movl(Address(end_to, 8), rax);
1837 __ BIND(L_exit);
1838 if (is_oop) {
1839 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
1840 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1841 }
1842 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1843 restore_arg_regs();
1844 __ xorptr(rax, rax); // return 0
1845 __ leave(); // required for proper stackwalking of RuntimeStub frame
1846 __ ret(0);
1848 // Copy 32-bytes chunks
1849 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1850 __ jmp(L_copy_4_bytes);
1852 return start;
1853 }
1855 // Arguments:
1856 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1857 // ignored
1858 // is_oop - true => oop array, so generate store check code
1859 // name - stub name string
1860 //
1861 // Inputs:
1862 // c_rarg0 - source array address
1863 // c_rarg1 - destination array address
1864 // c_rarg2 - element count, treated as ssize_t, can be zero
1865 //
1866 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1867 // the hardware handle it. The two dwords within qwords that span
1868 // cache line boundaries will still be loaded and stored atomicly.
1869 //
1870 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
1871 __ align(CodeEntryAlignment);
1872 StubCodeMark mark(this, "StubRoutines", name);
1873 address start = __ pc();
1875 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
1876 const Register from = rdi; // source array address
1877 const Register to = rsi; // destination array address
1878 const Register count = rdx; // elements count
1879 const Register dword_count = rcx;
1880 const Register qword_count = count;
1882 __ enter(); // required for proper stackwalking of RuntimeStub frame
1883 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1885 if (is_oop) {
1886 // no registers are destroyed by this call
1887 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1888 }
1890 (is_oop ? oop_copy_entry : int_copy_entry) = __ pc();
1891 BLOCK_COMMENT("Entry:");
1892 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1894 array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry,
1895 Address::times_4);
1896 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1897 // r9 and r10 may be used to save non-volatile registers
1899 assert_clean_int(count, rax); // Make sure 'count' is clean int.
1900 // 'from', 'to' and 'count' are now valid
1901 __ movptr(dword_count, count);
1902 __ shrptr(count, 1); // count => qword_count
1904 // Copy from high to low addresses. Use 'to' as scratch.
1906 // Check for and copy trailing dword
1907 __ testl(dword_count, 1);
1908 __ jcc(Assembler::zero, L_copy_32_bytes);
1909 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1910 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1911 __ jmp(L_copy_32_bytes);
1913 // Copy trailing qwords
1914 __ BIND(L_copy_8_bytes);
1915 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1916 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1917 __ decrement(qword_count);
1918 __ jcc(Assembler::notZero, L_copy_8_bytes);
1920 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1921 if (is_oop) {
1922 __ jmp(L_exit);
1923 }
1924 restore_arg_regs();
1925 __ xorptr(rax, rax); // return 0
1926 __ leave(); // required for proper stackwalking of RuntimeStub frame
1927 __ ret(0);
1929 // Copy in 32-bytes chunks
1930 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1932 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1933 __ bind(L_exit);
1934 if (is_oop) {
1935 Register end_to = rdx;
1936 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
1937 gen_write_ref_array_post_barrier(to, end_to, rax);
1938 }
1939 restore_arg_regs();
1940 __ xorptr(rax, rax); // return 0
1941 __ leave(); // required for proper stackwalking of RuntimeStub frame
1942 __ ret(0);
1944 return start;
1945 }
1947 // Arguments:
1948 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1949 // ignored
1950 // is_oop - true => oop array, so generate store check code
1951 // name - stub name string
1952 //
1953 // Inputs:
1954 // c_rarg0 - source array address
1955 // c_rarg1 - destination array address
1956 // c_rarg2 - element count, treated as ssize_t, can be zero
1957 //
1958 // Side Effects:
1959 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
1960 // no-overlap entry point used by generate_conjoint_long_oop_copy().
1961 //
1962 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
1963 __ align(CodeEntryAlignment);
1964 StubCodeMark mark(this, "StubRoutines", name);
1965 address start = __ pc();
1967 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
1968 const Register from = rdi; // source array address
1969 const Register to = rsi; // destination array address
1970 const Register qword_count = rdx; // elements count
1971 const Register end_from = from; // source array end address
1972 const Register end_to = rcx; // destination array end address
1973 const Register saved_to = to;
1974 // End pointers are inclusive, and if count is not zero they point
1975 // to the last unit copied: end_to[0] := end_from[0]
1977 __ enter(); // required for proper stackwalking of RuntimeStub frame
1978 // Save no-overlap entry point for generate_conjoint_long_oop_copy()
1979 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1981 if (is_oop) {
1982 disjoint_oop_copy_entry = __ pc();
1983 // no registers are destroyed by this call
1984 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1985 } else {
1986 disjoint_long_copy_entry = __ pc();
1987 }
1988 BLOCK_COMMENT("Entry:");
1989 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1991 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1992 // r9 and r10 may be used to save non-volatile registers
1994 // 'from', 'to' and 'qword_count' are now valid
1996 // Copy from low to high addresses. Use 'to' as scratch.
1997 __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1998 __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
1999 __ negptr(qword_count);
2000 __ jmp(L_copy_32_bytes);
2002 // Copy trailing qwords
2003 __ BIND(L_copy_8_bytes);
2004 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
2005 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
2006 __ increment(qword_count);
2007 __ jcc(Assembler::notZero, L_copy_8_bytes);
2009 if (is_oop) {
2010 __ jmp(L_exit);
2011 } else {
2012 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2013 restore_arg_regs();
2014 __ xorptr(rax, rax); // return 0
2015 __ leave(); // required for proper stackwalking of RuntimeStub frame
2016 __ ret(0);
2017 }
2019 // Copy 64-byte chunks
2020 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2022 if (is_oop) {
2023 __ BIND(L_exit);
2024 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
2025 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2026 } else {
2027 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2028 }
2029 restore_arg_regs();
2030 __ xorptr(rax, rax); // return 0
2031 __ leave(); // required for proper stackwalking of RuntimeStub frame
2032 __ ret(0);
2034 return start;
2035 }
2037 // Arguments:
2038 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
2039 // ignored
2040 // is_oop - true => oop array, so generate store check code
2041 // name - stub name string
2042 //
2043 // Inputs:
2044 // c_rarg0 - source array address
2045 // c_rarg1 - destination array address
2046 // c_rarg2 - element count, treated as ssize_t, can be zero
2047 //
2048 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
2049 __ align(CodeEntryAlignment);
2050 StubCodeMark mark(this, "StubRoutines", name);
2051 address start = __ pc();
2053 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
2054 const Register from = rdi; // source array address
2055 const Register to = rsi; // destination array address
2056 const Register qword_count = rdx; // elements count
2057 const Register saved_count = rcx;
2059 __ enter(); // required for proper stackwalking of RuntimeStub frame
2060 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
2062 address disjoint_copy_entry = NULL;
2063 if (is_oop) {
2064 assert(!UseCompressedOops, "shouldn't be called for compressed oops");
2065 disjoint_copy_entry = disjoint_oop_copy_entry;
2066 oop_copy_entry = __ pc();
2067 array_overlap_test(disjoint_oop_copy_entry, Address::times_8);
2068 } else {
2069 disjoint_copy_entry = disjoint_long_copy_entry;
2070 long_copy_entry = __ pc();
2071 array_overlap_test(disjoint_long_copy_entry, Address::times_8);
2072 }
2073 BLOCK_COMMENT("Entry:");
2074 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2076 array_overlap_test(disjoint_copy_entry, Address::times_8);
2077 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2078 // r9 and r10 may be used to save non-volatile registers
2080 // 'from', 'to' and 'qword_count' are now valid
2082 if (is_oop) {
2083 // Save to and count for store barrier
2084 __ movptr(saved_count, qword_count);
2085 // No registers are destroyed by this call
2086 gen_write_ref_array_pre_barrier(to, saved_count);
2087 }
2089 __ jmp(L_copy_32_bytes);
2091 // Copy trailing qwords
2092 __ BIND(L_copy_8_bytes);
2093 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2094 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2095 __ decrement(qword_count);
2096 __ jcc(Assembler::notZero, L_copy_8_bytes);
2098 if (is_oop) {
2099 __ jmp(L_exit);
2100 } else {
2101 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2102 restore_arg_regs();
2103 __ xorptr(rax, rax); // return 0
2104 __ leave(); // required for proper stackwalking of RuntimeStub frame
2105 __ ret(0);
2106 }
2108 // Copy in 32-bytes chunks
2109 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2111 if (is_oop) {
2112 __ BIND(L_exit);
2113 __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
2114 gen_write_ref_array_post_barrier(to, rcx, rax);
2115 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2116 } else {
2117 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2118 }
2119 restore_arg_regs();
2120 __ xorptr(rax, rax); // return 0
2121 __ leave(); // required for proper stackwalking of RuntimeStub frame
2122 __ ret(0);
2124 return start;
2125 }
2128 // Helper for generating a dynamic type check.
2129 // Smashes no registers.
2130 void generate_type_check(Register sub_klass,
2131 Register super_check_offset,
2132 Register super_klass,
2133 Label& L_success) {
2134 assert_different_registers(sub_klass, super_check_offset, super_klass);
2136 BLOCK_COMMENT("type_check:");
2138 Label L_miss;
2140 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL,
2141 super_check_offset);
2142 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL);
2144 // Fall through on failure!
2145 __ BIND(L_miss);
2146 }
2148 //
2149 // Generate checkcasting array copy stub
2150 //
2151 // Input:
2152 // c_rarg0 - source array address
2153 // c_rarg1 - destination array address
2154 // c_rarg2 - element count, treated as ssize_t, can be zero
2155 // c_rarg3 - size_t ckoff (super_check_offset)
2156 // not Win64
2157 // c_rarg4 - oop ckval (super_klass)
2158 // Win64
2159 // rsp+40 - oop ckval (super_klass)
2160 //
2161 // Output:
2162 // rax == 0 - success
2163 // rax == -1^K - failure, where K is partial transfer count
2164 //
2165 address generate_checkcast_copy(const char *name) {
2167 Label L_load_element, L_store_element, L_do_card_marks, L_done;
2169 // Input registers (after setup_arg_regs)
2170 const Register from = rdi; // source array address
2171 const Register to = rsi; // destination array address
2172 const Register length = rdx; // elements count
2173 const Register ckoff = rcx; // super_check_offset
2174 const Register ckval = r8; // super_klass
2176 // Registers used as temps (r13, r14 are save-on-entry)
2177 const Register end_from = from; // source array end address
2178 const Register end_to = r13; // destination array end address
2179 const Register count = rdx; // -(count_remaining)
2180 const Register r14_length = r14; // saved copy of length
2181 // End pointers are inclusive, and if length is not zero they point
2182 // to the last unit copied: end_to[0] := end_from[0]
2184 const Register rax_oop = rax; // actual oop copied
2185 const Register r11_klass = r11; // oop._klass
2187 //---------------------------------------------------------------
2188 // Assembler stub will be used for this call to arraycopy
2189 // if the two arrays are subtypes of Object[] but the
2190 // destination array type is not equal to or a supertype
2191 // of the source type. Each element must be separately
2192 // checked.
2194 __ align(CodeEntryAlignment);
2195 StubCodeMark mark(this, "StubRoutines", name);
2196 address start = __ pc();
2198 __ enter(); // required for proper stackwalking of RuntimeStub frame
2200 #ifdef ASSERT
2201 // caller guarantees that the arrays really are different
2202 // otherwise, we would have to make conjoint checks
2203 { Label L;
2204 array_overlap_test(L, TIMES_OOP);
2205 __ stop("checkcast_copy within a single array");
2206 __ bind(L);
2207 }
2208 #endif //ASSERT
2210 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2211 // ckoff => rcx, ckval => r8
2212 // r9 and r10 may be used to save non-volatile registers
2213 #ifdef _WIN64
2214 // last argument (#4) is on stack on Win64
2215 __ movptr(ckval, Address(rsp, 6 * wordSize));
2216 #endif
2218 // Caller of this entry point must set up the argument registers.
2219 checkcast_copy_entry = __ pc();
2220 BLOCK_COMMENT("Entry:");
2222 // allocate spill slots for r13, r14
2223 enum {
2224 saved_r13_offset,
2225 saved_r14_offset,
2226 saved_rbp_offset
2227 };
2228 __ subptr(rsp, saved_rbp_offset * wordSize);
2229 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2230 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2232 // check that int operands are properly extended to size_t
2233 assert_clean_int(length, rax);
2234 assert_clean_int(ckoff, rax);
2236 #ifdef ASSERT
2237 BLOCK_COMMENT("assert consistent ckoff/ckval");
2238 // The ckoff and ckval must be mutually consistent,
2239 // even though caller generates both.
2240 { Label L;
2241 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2242 Klass::super_check_offset_offset_in_bytes());
2243 __ cmpl(ckoff, Address(ckval, sco_offset));
2244 __ jcc(Assembler::equal, L);
2245 __ stop("super_check_offset inconsistent");
2246 __ bind(L);
2247 }
2248 #endif //ASSERT
2250 // Loop-invariant addresses. They are exclusive end pointers.
2251 Address end_from_addr(from, length, TIMES_OOP, 0);
2252 Address end_to_addr(to, length, TIMES_OOP, 0);
2253 // Loop-variant addresses. They assume post-incremented count < 0.
2254 Address from_element_addr(end_from, count, TIMES_OOP, 0);
2255 Address to_element_addr(end_to, count, TIMES_OOP, 0);
2257 gen_write_ref_array_pre_barrier(to, count);
2259 // Copy from low to high addresses, indexed from the end of each array.
2260 __ lea(end_from, end_from_addr);
2261 __ lea(end_to, end_to_addr);
2262 __ movptr(r14_length, length); // save a copy of the length
2263 assert(length == count, ""); // else fix next line:
2264 __ negptr(count); // negate and test the length
2265 __ jcc(Assembler::notZero, L_load_element);
2267 // Empty array: Nothing to do.
2268 __ xorptr(rax, rax); // return 0 on (trivial) success
2269 __ jmp(L_done);
2271 // ======== begin loop ========
2272 // (Loop is rotated; its entry is L_load_element.)
2273 // Loop control:
2274 // for (count = -count; count != 0; count++)
2275 // Base pointers src, dst are biased by 8*(count-1),to last element.
2276 __ align(OptoLoopAlignment);
2278 __ BIND(L_store_element);
2279 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
2280 __ increment(count); // increment the count toward zero
2281 __ jcc(Assembler::zero, L_do_card_marks);
2283 // ======== loop entry is here ========
2284 __ BIND(L_load_element);
2285 __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2286 __ testptr(rax_oop, rax_oop);
2287 __ jcc(Assembler::zero, L_store_element);
2289 __ load_klass(r11_klass, rax_oop);// query the object klass
2290 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2291 // ======== end loop ========
2293 // It was a real error; we must depend on the caller to finish the job.
2294 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2295 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2296 // and report their number to the caller.
2297 assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2298 __ lea(end_to, to_element_addr);
2299 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2300 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2301 __ movptr(rax, r14_length); // original oops
2302 __ addptr(rax, count); // K = (original - remaining) oops
2303 __ notptr(rax); // report (-1^K) to caller
2304 __ jmp(L_done);
2306 // Come here on success only.
2307 __ BIND(L_do_card_marks);
2308 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer
2309 gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2310 __ xorptr(rax, rax); // return 0 on success
2312 // Common exit point (success or failure).
2313 __ BIND(L_done);
2314 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2315 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2316 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2317 restore_arg_regs();
2318 __ leave(); // required for proper stackwalking of RuntimeStub frame
2319 __ ret(0);
2321 return start;
2322 }
2324 //
2325 // Generate 'unsafe' array copy stub
2326 // Though just as safe as the other stubs, it takes an unscaled
2327 // size_t argument instead of an element count.
2328 //
2329 // Input:
2330 // c_rarg0 - source array address
2331 // c_rarg1 - destination array address
2332 // c_rarg2 - byte count, treated as ssize_t, can be zero
2333 //
2334 // Examines the alignment of the operands and dispatches
2335 // to a long, int, short, or byte copy loop.
2336 //
2337 address generate_unsafe_copy(const char *name) {
2339 Label L_long_aligned, L_int_aligned, L_short_aligned;
2341 // Input registers (before setup_arg_regs)
2342 const Register from = c_rarg0; // source array address
2343 const Register to = c_rarg1; // destination array address
2344 const Register size = c_rarg2; // byte count (size_t)
2346 // Register used as a temp
2347 const Register bits = rax; // test copy of low bits
2349 __ align(CodeEntryAlignment);
2350 StubCodeMark mark(this, "StubRoutines", name);
2351 address start = __ pc();
2353 __ enter(); // required for proper stackwalking of RuntimeStub frame
2355 // bump this on entry, not on exit:
2356 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2358 __ mov(bits, from);
2359 __ orptr(bits, to);
2360 __ orptr(bits, size);
2362 __ testb(bits, BytesPerLong-1);
2363 __ jccb(Assembler::zero, L_long_aligned);
2365 __ testb(bits, BytesPerInt-1);
2366 __ jccb(Assembler::zero, L_int_aligned);
2368 __ testb(bits, BytesPerShort-1);
2369 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2371 __ BIND(L_short_aligned);
2372 __ shrptr(size, LogBytesPerShort); // size => short_count
2373 __ jump(RuntimeAddress(short_copy_entry));
2375 __ BIND(L_int_aligned);
2376 __ shrptr(size, LogBytesPerInt); // size => int_count
2377 __ jump(RuntimeAddress(int_copy_entry));
2379 __ BIND(L_long_aligned);
2380 __ shrptr(size, LogBytesPerLong); // size => qword_count
2381 __ jump(RuntimeAddress(long_copy_entry));
2383 return start;
2384 }
2386 // Perform range checks on the proposed arraycopy.
2387 // Kills temp, but nothing else.
2388 // Also, clean the sign bits of src_pos and dst_pos.
2389 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
2390 Register src_pos, // source position (c_rarg1)
2391 Register dst, // destination array oo (c_rarg2)
2392 Register dst_pos, // destination position (c_rarg3)
2393 Register length,
2394 Register temp,
2395 Label& L_failed) {
2396 BLOCK_COMMENT("arraycopy_range_checks:");
2398 // if (src_pos + length > arrayOop(src)->length()) FAIL;
2399 __ movl(temp, length);
2400 __ addl(temp, src_pos); // src_pos + length
2401 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
2402 __ jcc(Assembler::above, L_failed);
2404 // if (dst_pos + length > arrayOop(dst)->length()) FAIL;
2405 __ movl(temp, length);
2406 __ addl(temp, dst_pos); // dst_pos + length
2407 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2408 __ jcc(Assembler::above, L_failed);
2410 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2411 // Move with sign extension can be used since they are positive.
2412 __ movslq(src_pos, src_pos);
2413 __ movslq(dst_pos, dst_pos);
2415 BLOCK_COMMENT("arraycopy_range_checks done");
2416 }
2418 //
2419 // Generate generic array copy stubs
2420 //
2421 // Input:
2422 // c_rarg0 - src oop
2423 // c_rarg1 - src_pos (32-bits)
2424 // c_rarg2 - dst oop
2425 // c_rarg3 - dst_pos (32-bits)
2426 // not Win64
2427 // c_rarg4 - element count (32-bits)
2428 // Win64
2429 // rsp+40 - element count (32-bits)
2430 //
2431 // Output:
2432 // rax == 0 - success
2433 // rax == -1^K - failure, where K is partial transfer count
2434 //
2435 address generate_generic_copy(const char *name) {
2437 Label L_failed, L_failed_0, L_objArray;
2438 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
2440 // Input registers
2441 const Register src = c_rarg0; // source array oop
2442 const Register src_pos = c_rarg1; // source position
2443 const Register dst = c_rarg2; // destination array oop
2444 const Register dst_pos = c_rarg3; // destination position
2445 #ifndef _WIN64
2446 const Register length = c_rarg4;
2447 #else
2448 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64
2449 #endif
2451 { int modulus = CodeEntryAlignment;
2452 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
2453 int advance = target - (__ offset() % modulus);
2454 if (advance < 0) advance += modulus;
2455 if (advance > 0) __ nop(advance);
2456 }
2457 StubCodeMark mark(this, "StubRoutines", name);
2459 // Short-hop target to L_failed. Makes for denser prologue code.
2460 __ BIND(L_failed_0);
2461 __ jmp(L_failed);
2462 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
2464 __ align(CodeEntryAlignment);
2465 address start = __ pc();
2467 __ enter(); // required for proper stackwalking of RuntimeStub frame
2469 // bump this on entry, not on exit:
2470 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2472 //-----------------------------------------------------------------------
2473 // Assembler stub will be used for this call to arraycopy
2474 // if the following conditions are met:
2475 //
2476 // (1) src and dst must not be null.
2477 // (2) src_pos must not be negative.
2478 // (3) dst_pos must not be negative.
2479 // (4) length must not be negative.
2480 // (5) src klass and dst klass should be the same and not NULL.
2481 // (6) src and dst should be arrays.
2482 // (7) src_pos + length must not exceed length of src.
2483 // (8) dst_pos + length must not exceed length of dst.
2484 //
2486 // if (src == NULL) return -1;
2487 __ testptr(src, src); // src oop
2488 size_t j1off = __ offset();
2489 __ jccb(Assembler::zero, L_failed_0);
2491 // if (src_pos < 0) return -1;
2492 __ testl(src_pos, src_pos); // src_pos (32-bits)
2493 __ jccb(Assembler::negative, L_failed_0);
2495 // if (dst == NULL) return -1;
2496 __ testptr(dst, dst); // dst oop
2497 __ jccb(Assembler::zero, L_failed_0);
2499 // if (dst_pos < 0) return -1;
2500 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2501 size_t j4off = __ offset();
2502 __ jccb(Assembler::negative, L_failed_0);
2504 // The first four tests are very dense code,
2505 // but not quite dense enough to put four
2506 // jumps in a 16-byte instruction fetch buffer.
2507 // That's good, because some branch predicters
2508 // do not like jumps so close together.
2509 // Make sure of this.
2510 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2512 // registers used as temp
2513 const Register r11_length = r11; // elements count to copy
2514 const Register r10_src_klass = r10; // array klass
2516 // if (length < 0) return -1;
2517 __ movl(r11_length, length); // length (elements count, 32-bits value)
2518 __ testl(r11_length, r11_length);
2519 __ jccb(Assembler::negative, L_failed_0);
2521 __ load_klass(r10_src_klass, src);
2522 #ifdef ASSERT
2523 // assert(src->klass() != NULL);
2524 {
2525 BLOCK_COMMENT("assert klasses not null {");
2526 Label L1, L2;
2527 __ testptr(r10_src_klass, r10_src_klass);
2528 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2529 __ bind(L1);
2530 __ stop("broken null klass");
2531 __ bind(L2);
2532 __ load_klass(rax, dst);
2533 __ cmpq(rax, 0);
2534 __ jcc(Assembler::equal, L1); // this would be broken also
2535 BLOCK_COMMENT("} assert klasses not null done");
2536 }
2537 #endif
2539 // Load layout helper (32-bits)
2540 //
2541 // |array_tag| | header_size | element_type | |log2_element_size|
2542 // 32 30 24 16 8 2 0
2543 //
2544 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2545 //
2547 const int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2548 Klass::layout_helper_offset_in_bytes();
2550 // Handle objArrays completely differently...
2551 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2552 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
2553 __ jcc(Assembler::equal, L_objArray);
2555 // if (src->klass() != dst->klass()) return -1;
2556 __ load_klass(rax, dst);
2557 __ cmpq(r10_src_klass, rax);
2558 __ jcc(Assembler::notEqual, L_failed);
2560 const Register rax_lh = rax; // layout helper
2561 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2563 // if (!src->is_Array()) return -1;
2564 __ cmpl(rax_lh, Klass::_lh_neutral_value);
2565 __ jcc(Assembler::greaterEqual, L_failed);
2567 // At this point, it is known to be a typeArray (array_tag 0x3).
2568 #ifdef ASSERT
2569 {
2570 BLOCK_COMMENT("assert primitive array {");
2571 Label L;
2572 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2573 __ jcc(Assembler::greaterEqual, L);
2574 __ stop("must be a primitive array");
2575 __ bind(L);
2576 BLOCK_COMMENT("} assert primitive array done");
2577 }
2578 #endif
2580 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2581 r10, L_failed);
2583 // typeArrayKlass
2584 //
2585 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2586 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2587 //
2589 const Register r10_offset = r10; // array offset
2590 const Register rax_elsize = rax_lh; // element size
2592 __ movl(r10_offset, rax_lh);
2593 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2594 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
2595 __ addptr(src, r10_offset); // src array offset
2596 __ addptr(dst, r10_offset); // dst array offset
2597 BLOCK_COMMENT("choose copy loop based on element size");
2598 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2600 // next registers should be set before the jump to corresponding stub
2601 const Register from = c_rarg0; // source array address
2602 const Register to = c_rarg1; // destination array address
2603 const Register count = c_rarg2; // elements count
2605 // 'from', 'to', 'count' registers should be set in such order
2606 // since they are the same as 'src', 'src_pos', 'dst'.
2608 __ BIND(L_copy_bytes);
2609 __ cmpl(rax_elsize, 0);
2610 __ jccb(Assembler::notEqual, L_copy_shorts);
2611 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2612 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2613 __ movl2ptr(count, r11_length); // length
2614 __ jump(RuntimeAddress(byte_copy_entry));
2616 __ BIND(L_copy_shorts);
2617 __ cmpl(rax_elsize, LogBytesPerShort);
2618 __ jccb(Assembler::notEqual, L_copy_ints);
2619 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2620 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2621 __ movl2ptr(count, r11_length); // length
2622 __ jump(RuntimeAddress(short_copy_entry));
2624 __ BIND(L_copy_ints);
2625 __ cmpl(rax_elsize, LogBytesPerInt);
2626 __ jccb(Assembler::notEqual, L_copy_longs);
2627 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2628 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2629 __ movl2ptr(count, r11_length); // length
2630 __ jump(RuntimeAddress(int_copy_entry));
2632 __ BIND(L_copy_longs);
2633 #ifdef ASSERT
2634 {
2635 BLOCK_COMMENT("assert long copy {");
2636 Label L;
2637 __ cmpl(rax_elsize, LogBytesPerLong);
2638 __ jcc(Assembler::equal, L);
2639 __ stop("must be long copy, but elsize is wrong");
2640 __ bind(L);
2641 BLOCK_COMMENT("} assert long copy done");
2642 }
2643 #endif
2644 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2645 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2646 __ movl2ptr(count, r11_length); // length
2647 __ jump(RuntimeAddress(long_copy_entry));
2649 // objArrayKlass
2650 __ BIND(L_objArray);
2651 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos]
2653 Label L_plain_copy, L_checkcast_copy;
2654 // test array classes for subtyping
2655 __ load_klass(rax, dst);
2656 __ cmpq(r10_src_klass, rax); // usual case is exact equality
2657 __ jcc(Assembler::notEqual, L_checkcast_copy);
2659 // Identically typed arrays can be copied without element-wise checks.
2660 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2661 r10, L_failed);
2663 __ lea(from, Address(src, src_pos, TIMES_OOP,
2664 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2665 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2666 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2667 __ movl2ptr(count, r11_length); // length
2668 __ BIND(L_plain_copy);
2669 __ jump(RuntimeAddress(oop_copy_entry));
2671 __ BIND(L_checkcast_copy);
2672 // live at this point: r10_src_klass, r11_length, rax (dst_klass)
2673 {
2674 // Before looking at dst.length, make sure dst is also an objArray.
2675 __ cmpl(Address(rax, lh_offset), objArray_lh);
2676 __ jcc(Assembler::notEqual, L_failed);
2678 // It is safe to examine both src.length and dst.length.
2679 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2680 rax, L_failed);
2682 const Register r11_dst_klass = r11;
2683 __ load_klass(r11_dst_klass, dst); // reload
2685 // Marshal the base address arguments now, freeing registers.
2686 __ lea(from, Address(src, src_pos, TIMES_OOP,
2687 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2688 __ lea(to, Address(dst, dst_pos, TIMES_OOP,
2689 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2690 __ movl(count, length); // length (reloaded)
2691 Register sco_temp = c_rarg3; // this register is free now
2692 assert_different_registers(from, to, count, sco_temp,
2693 r11_dst_klass, r10_src_klass);
2694 assert_clean_int(count, sco_temp);
2696 // Generate the type check.
2697 const int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2698 Klass::super_check_offset_offset_in_bytes());
2699 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2700 assert_clean_int(sco_temp, rax);
2701 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2703 // Fetch destination element klass from the objArrayKlass header.
2704 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2705 objArrayKlass::element_klass_offset_in_bytes());
2706 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2707 __ movl( sco_temp, Address(r11_dst_klass, sco_offset));
2708 assert_clean_int(sco_temp, rax);
2710 // the checkcast_copy loop needs two extra arguments:
2711 assert(c_rarg3 == sco_temp, "#3 already in place");
2712 // Set up arguments for checkcast_copy_entry.
2713 setup_arg_regs(4);
2714 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris
2715 __ jump(RuntimeAddress(checkcast_copy_entry));
2716 }
2718 __ BIND(L_failed);
2719 __ xorptr(rax, rax);
2720 __ notptr(rax); // return -1
2721 __ leave(); // required for proper stackwalking of RuntimeStub frame
2722 __ ret(0);
2724 return start;
2725 }
2727 void generate_arraycopy_stubs() {
2728 // Call the conjoint generation methods immediately after
2729 // the disjoint ones so that short branches from the former
2730 // to the latter can be generated.
2731 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2732 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2734 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2735 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
2737 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy");
2738 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy");
2740 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
2741 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
2744 if (UseCompressedOops) {
2745 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy");
2746 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy");
2747 } else {
2748 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
2749 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
2750 }
2752 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2753 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
2754 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
2756 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
2757 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
2758 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
2759 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
2760 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2761 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
2763 // We don't generate specialized code for HeapWord-aligned source
2764 // arrays, so just use the code we've already generated
2765 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy;
2766 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy;
2768 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2769 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy;
2771 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2772 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2774 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2775 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2777 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2778 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2779 }
2781 void generate_math_stubs() {
2782 {
2783 StubCodeMark mark(this, "StubRoutines", "log");
2784 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc();
2786 __ subq(rsp, 8);
2787 __ movdbl(Address(rsp, 0), xmm0);
2788 __ fld_d(Address(rsp, 0));
2789 __ flog();
2790 __ fstp_d(Address(rsp, 0));
2791 __ movdbl(xmm0, Address(rsp, 0));
2792 __ addq(rsp, 8);
2793 __ ret(0);
2794 }
2795 {
2796 StubCodeMark mark(this, "StubRoutines", "log10");
2797 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc();
2799 __ subq(rsp, 8);
2800 __ movdbl(Address(rsp, 0), xmm0);
2801 __ fld_d(Address(rsp, 0));
2802 __ flog10();
2803 __ fstp_d(Address(rsp, 0));
2804 __ movdbl(xmm0, Address(rsp, 0));
2805 __ addq(rsp, 8);
2806 __ ret(0);
2807 }
2808 {
2809 StubCodeMark mark(this, "StubRoutines", "sin");
2810 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc();
2812 __ subq(rsp, 8);
2813 __ movdbl(Address(rsp, 0), xmm0);
2814 __ fld_d(Address(rsp, 0));
2815 __ trigfunc('s');
2816 __ fstp_d(Address(rsp, 0));
2817 __ movdbl(xmm0, Address(rsp, 0));
2818 __ addq(rsp, 8);
2819 __ ret(0);
2820 }
2821 {
2822 StubCodeMark mark(this, "StubRoutines", "cos");
2823 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc();
2825 __ subq(rsp, 8);
2826 __ movdbl(Address(rsp, 0), xmm0);
2827 __ fld_d(Address(rsp, 0));
2828 __ trigfunc('c');
2829 __ fstp_d(Address(rsp, 0));
2830 __ movdbl(xmm0, Address(rsp, 0));
2831 __ addq(rsp, 8);
2832 __ ret(0);
2833 }
2834 {
2835 StubCodeMark mark(this, "StubRoutines", "tan");
2836 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc();
2838 __ subq(rsp, 8);
2839 __ movdbl(Address(rsp, 0), xmm0);
2840 __ fld_d(Address(rsp, 0));
2841 __ trigfunc('t');
2842 __ fstp_d(Address(rsp, 0));
2843 __ movdbl(xmm0, Address(rsp, 0));
2844 __ addq(rsp, 8);
2845 __ ret(0);
2846 }
2848 // The intrinsic version of these seem to return the same value as
2849 // the strict version.
2850 StubRoutines::_intrinsic_exp = SharedRuntime::dexp;
2851 StubRoutines::_intrinsic_pow = SharedRuntime::dpow;
2852 }
2854 #undef __
2855 #define __ masm->
2857 // Continuation point for throwing of implicit exceptions that are
2858 // not handled in the current activation. Fabricates an exception
2859 // oop and initiates normal exception dispatching in this
2860 // frame. Since we need to preserve callee-saved values (currently
2861 // only for C2, but done for C1 as well) we need a callee-saved oop
2862 // map and therefore have to make these stubs into RuntimeStubs
2863 // rather than BufferBlobs. If the compiler needs all registers to
2864 // be preserved between the fault point and the exception handler
2865 // then it must assume responsibility for that in
2866 // AbstractCompiler::continuation_for_implicit_null_exception or
2867 // continuation_for_implicit_division_by_zero_exception. All other
2868 // implicit exceptions (e.g., NullPointerException or
2869 // AbstractMethodError on entry) are either at call sites or
2870 // otherwise assume that stack unwinding will be initiated, so
2871 // caller saved registers were assumed volatile in the compiler.
2872 address generate_throw_exception(const char* name,
2873 address runtime_entry,
2874 bool restore_saved_exception_pc) {
2875 // Information about frame layout at time of blocking runtime call.
2876 // Note that we only have to preserve callee-saved registers since
2877 // the compilers are responsible for supplying a continuation point
2878 // if they expect all registers to be preserved.
2879 enum layout {
2880 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
2881 rbp_off2,
2882 return_off,
2883 return_off2,
2884 framesize // inclusive of return address
2885 };
2887 int insts_size = 512;
2888 int locs_size = 64;
2890 CodeBuffer code(name, insts_size, locs_size);
2891 OopMapSet* oop_maps = new OopMapSet();
2892 MacroAssembler* masm = new MacroAssembler(&code);
2894 address start = __ pc();
2896 // This is an inlined and slightly modified version of call_VM
2897 // which has the ability to fetch the return PC out of
2898 // thread-local storage and also sets up last_Java_sp slightly
2899 // differently than the real call_VM
2900 if (restore_saved_exception_pc) {
2901 __ movptr(rax,
2902 Address(r15_thread,
2903 in_bytes(JavaThread::saved_exception_pc_offset())));
2904 __ push(rax);
2905 }
2907 __ enter(); // required for proper stackwalking of RuntimeStub frame
2909 assert(is_even(framesize/2), "sp not 16-byte aligned");
2911 // return address and rbp are already in place
2912 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
2914 int frame_complete = __ pc() - start;
2916 // Set up last_Java_sp and last_Java_fp
2917 __ set_last_Java_frame(rsp, rbp, NULL);
2919 // Call runtime
2920 __ movptr(c_rarg0, r15_thread);
2921 BLOCK_COMMENT("call runtime_entry");
2922 __ call(RuntimeAddress(runtime_entry));
2924 // Generate oop map
2925 OopMap* map = new OopMap(framesize, 0);
2927 oop_maps->add_gc_map(__ pc() - start, map);
2929 __ reset_last_Java_frame(true, false);
2931 __ leave(); // required for proper stackwalking of RuntimeStub frame
2933 // check for pending exceptions
2934 #ifdef ASSERT
2935 Label L;
2936 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
2937 (int32_t) NULL_WORD);
2938 __ jcc(Assembler::notEqual, L);
2939 __ should_not_reach_here();
2940 __ bind(L);
2941 #endif // ASSERT
2942 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2945 // codeBlob framesize is in words (not VMRegImpl::slot_size)
2946 RuntimeStub* stub =
2947 RuntimeStub::new_runtime_stub(name,
2948 &code,
2949 frame_complete,
2950 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2951 oop_maps, false);
2952 return stub->entry_point();
2953 }
2955 // Initialization
2956 void generate_initial() {
2957 // Generates all stubs and initializes the entry points
2959 // This platform-specific stub is needed by generate_call_stub()
2960 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
2962 // entry points that exist in all platforms Note: This is code
2963 // that could be shared among different platforms - however the
2964 // benefit seems to be smaller than the disadvantage of having a
2965 // much more complicated generator structure. See also comment in
2966 // stubRoutines.hpp.
2968 StubRoutines::_forward_exception_entry = generate_forward_exception();
2970 StubRoutines::_call_stub_entry =
2971 generate_call_stub(StubRoutines::_call_stub_return_address);
2973 // is referenced by megamorphic call
2974 StubRoutines::_catch_exception_entry = generate_catch_exception();
2976 // atomic calls
2977 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2978 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
2979 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
2980 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2981 StubRoutines::_atomic_add_entry = generate_atomic_add();
2982 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
2983 StubRoutines::_fence_entry = generate_orderaccess_fence();
2985 StubRoutines::_handler_for_unsafe_access_entry =
2986 generate_handler_for_unsafe_access();
2988 // platform dependent
2989 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
2991 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
2992 }
2994 void generate_all() {
2995 // Generates all stubs and initializes the entry points
2997 // These entry points require SharedInfo::stack0 to be set up in
2998 // non-core builds and need to be relocatable, so they each
2999 // fabricate a RuntimeStub internally.
3000 StubRoutines::_throw_AbstractMethodError_entry =
3001 generate_throw_exception("AbstractMethodError throw_exception",
3002 CAST_FROM_FN_PTR(address,
3003 SharedRuntime::
3004 throw_AbstractMethodError),
3005 false);
3007 StubRoutines::_throw_IncompatibleClassChangeError_entry =
3008 generate_throw_exception("IncompatibleClassChangeError throw_exception",
3009 CAST_FROM_FN_PTR(address,
3010 SharedRuntime::
3011 throw_IncompatibleClassChangeError),
3012 false);
3014 StubRoutines::_throw_ArithmeticException_entry =
3015 generate_throw_exception("ArithmeticException throw_exception",
3016 CAST_FROM_FN_PTR(address,
3017 SharedRuntime::
3018 throw_ArithmeticException),
3019 true);
3021 StubRoutines::_throw_NullPointerException_entry =
3022 generate_throw_exception("NullPointerException throw_exception",
3023 CAST_FROM_FN_PTR(address,
3024 SharedRuntime::
3025 throw_NullPointerException),
3026 true);
3028 StubRoutines::_throw_NullPointerException_at_call_entry =
3029 generate_throw_exception("NullPointerException at call throw_exception",
3030 CAST_FROM_FN_PTR(address,
3031 SharedRuntime::
3032 throw_NullPointerException_at_call),
3033 false);
3035 StubRoutines::_throw_StackOverflowError_entry =
3036 generate_throw_exception("StackOverflowError throw_exception",
3037 CAST_FROM_FN_PTR(address,
3038 SharedRuntime::
3039 throw_StackOverflowError),
3040 false);
3042 // entry points that are platform specific
3043 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
3044 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
3045 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
3046 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
3048 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
3049 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
3050 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
3051 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
3053 // support for verify_oop (must happen after universe_init)
3054 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
3056 // arraycopy stubs used by compilers
3057 generate_arraycopy_stubs();
3059 generate_math_stubs();
3060 }
3062 public:
3063 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3064 if (all) {
3065 generate_all();
3066 } else {
3067 generate_initial();
3068 }
3069 }
3070 }; // end class declaration
3072 address StubGenerator::disjoint_byte_copy_entry = NULL;
3073 address StubGenerator::disjoint_short_copy_entry = NULL;
3074 address StubGenerator::disjoint_int_copy_entry = NULL;
3075 address StubGenerator::disjoint_long_copy_entry = NULL;
3076 address StubGenerator::disjoint_oop_copy_entry = NULL;
3078 address StubGenerator::byte_copy_entry = NULL;
3079 address StubGenerator::short_copy_entry = NULL;
3080 address StubGenerator::int_copy_entry = NULL;
3081 address StubGenerator::long_copy_entry = NULL;
3082 address StubGenerator::oop_copy_entry = NULL;
3084 address StubGenerator::checkcast_copy_entry = NULL;
3086 void StubGenerator_generate(CodeBuffer* code, bool all) {
3087 StubGenerator g(code, all);
3088 }